Enable ngraph sanity test:
 - test that ngraph shared libray enable compiling of ngraph application
 - test that ngraph library allow creating network at runtime for inference 
engine

Signed-off-by: Yeoh Ee Peng <[email protected]>
---
 lib/oeqa/runtime/cases/ngraph.py                | 31 ++++++++++++
 lib/oeqa/runtime/files/ngraph/ngraph-sample.cpp | 64 +++++++++++++++++++++++++
 lib/oeqa/runtime/miutils/tests/ngraph_test.py   | 30 ++++++++++++
 3 files changed, 125 insertions(+)
 create mode 100644 lib/oeqa/runtime/cases/ngraph.py
 create mode 100644 lib/oeqa/runtime/files/ngraph/ngraph-sample.cpp
 create mode 100644 lib/oeqa/runtime/miutils/tests/ngraph_test.py

diff --git a/lib/oeqa/runtime/cases/ngraph.py b/lib/oeqa/runtime/cases/ngraph.py
new file mode 100644
index 0000000..15396d4
--- /dev/null
+++ b/lib/oeqa/runtime/cases/ngraph.py
@@ -0,0 +1,31 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.miutils.targets.oeqatarget import OEQATarget
+from oeqa.runtime.miutils.tests.ngraph_test import NgraphTest
+
+class Ngraph(OERuntimeTestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.ngraph = NgraphTest(OEQATarget(cls.tc.target))
+        cls.ngraph.setup()
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.ngraph.tear_down()
+
+    @OEHasPackage(['ngraph'])
+    @OEHasPackage(['ngraph-dev'])
+    @OEHasPackage(['gcc'])
+    @OEHasPackage(['gcc-symlinks'])
+    @OEHasPackage(['libstdc++-dev'])
+    @OEHasPackage(['binutils'])
+    def test_ngraph_can_compile(self):
+        (status, output) = self.ngraph.test_ngraph_can_compile()
+        self.assertEqual(status, 0, msg='status and output: %s and %s' % 
(status, output))
+
+    @OETestDepends(['ngraph.Ngraph.test_ngraph_can_compile'])
+    def test_ngraph_can_compute_network_for_inference_engine(self):
+        (status, output) = 
self.ngraph.test_ngraph_can_compute_network_for_inference_engine()
+        self.assertEqual(status, 0, msg='status and output: %s and %s' % 
(status, output))
diff --git a/lib/oeqa/runtime/files/ngraph/ngraph-sample.cpp 
b/lib/oeqa/runtime/files/ngraph/ngraph-sample.cpp
new file mode 100644
index 0000000..5ac9b36
--- /dev/null
+++ b/lib/oeqa/runtime/files/ngraph/ngraph-sample.cpp
@@ -0,0 +1,64 @@
+#include <iostream>
+
+#include <ngraph/ngraph.hpp>
+
+using namespace ngraph;
+
+int main()
+{
+    // Build the graph
+    Shape s{2, 3};
+    auto a = std::make_shared<op::Parameter>(element::f32, s);
+    auto b = std::make_shared<op::Parameter>(element::f32, s);
+    auto c = std::make_shared<op::Parameter>(element::f32, s);
+
+    auto t0 = std::make_shared<op::Add>(a, b);
+    auto t1 = std::make_shared<op::Multiply>(t0, c);
+
+    // Make the function
+    // OutputVector not defined in ngraph-v22
+    // Used NodeVector instead of OutputVector
+    auto f = std::make_shared<Function>(NodeVector{t1},
+                                        ParameterVector{a, b, c});
+
+    // // Create the backend
+    // auto backend = runtime::Backend::create("CPU");
+
+    // // Allocate tensors for arguments a, b, c
+    // auto t_a = backend->create_tensor(element::f32, s);
+    // auto t_b = backend->create_tensor(element::f32, s);
+    // auto t_c = backend->create_tensor(element::f32, s);
+    // // Allocate tensor for the result
+    // auto t_result = backend->create_tensor(element::f32, s);
+
+    // // Initialize tensors
+    // float v_a[2][3] = {{1, 2, 3}, {4, 5, 6}};
+    // float v_b[2][3] = {{7, 8, 9}, {10, 11, 12}};
+    // float v_c[2][3] = {{1, 0, -1}, {-1, 1, 2}};
+
+    // t_a->write(&v_a, sizeof(v_a));
+    // t_b->write(&v_b, sizeof(v_b));
+    // t_c->write(&v_c, sizeof(v_c));
+
+    // // Invoke the function
+    // auto exec = backend->compile(f);
+    // exec->call({t_result}, {t_a, t_b, t_c});
+
+    // // Get the result
+    // float r[2][3];
+    // t_result->read(&r, sizeof(r));
+
+    // std::cout << "[" << std::endl;
+    // for (size_t i = 0; i < s[0]; ++i)
+    // {
+    //     std::cout << " [";
+    //     for (size_t j = 0; j < s[1]; ++j)
+    //     {
+    //         std::cout << r[i][j] << ' ';
+    //     }
+    //     std::cout << ']' << std::endl;
+    // }
+    // std::cout << ']' << std::endl;
+
+    return 0;
+}
\ No newline at end of file
diff --git a/lib/oeqa/runtime/miutils/tests/ngraph_test.py 
b/lib/oeqa/runtime/miutils/tests/ngraph_test.py
new file mode 100644
index 0000000..a26660e
--- /dev/null
+++ b/lib/oeqa/runtime/miutils/tests/ngraph_test.py
@@ -0,0 +1,30 @@
+import os
+script_path = os.path.dirname(os.path.realpath(__file__))
+files_path = os.path.join(script_path, '../../files/')
+
+class NgraphTest(object):
+    ngraph_src_dir = '/tmp/'
+    ngraph_src_filename = 'ngraph-sample.cpp'
+    ngraph_target_filename = 'ngraph-sample'
+
+    def __init__(self, target):
+        self.target = target
+
+    def setup(self):
+        self.target.copy_to(os.path.join(files_path, 'ngraph', 
self.ngraph_src_filename), self.ngraph_src_dir)
+
+    def tear_down(self):
+        files = '%s%s %s%s' % (self.ngraph_src_dir,
+                               self.ngraph_src_filename,
+                               self.ngraph_src_dir,
+                               self.ngraph_target_filename)
+        self.target.run('rm %s' % files)
+
+    def test_ngraph_can_compile(self):
+        return self.target.run('gcc %s%s -o %s%s -lngraph -lstdc++' % 
(self.ngraph_src_dir,
+                                                                       
self.ngraph_src_filename,
+                                                                       
self.ngraph_src_dir,
+                                                                       
self.ngraph_target_filename))
+
+    def test_ngraph_can_compute_network_for_inference_engine(self):
+        return self.target.run('cd /tmp; ./%s' % self.ngraph_target_filename)
-- 
2.7.4

-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.

View/Reply Online (#6433): 
https://lists.yoctoproject.org/g/meta-intel/message/6433
Mute This Topic: https://lists.yoctoproject.org/mt/72095573/21656
Group Owner: [email protected]
Unsubscribe: https://lists.yoctoproject.org/g/meta-intel/unsub  
[[email protected]]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to