Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package armnn for openSUSE:Factory checked 
in at 2021-05-21 21:50:22
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/armnn (Old)
 and      /work/SRC/openSUSE:Factory/.armnn.new.2988 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "armnn"

Fri May 21 21:50:22 2021 rev:16 rq:894803 version:21.05

Changes:
--------
--- /work/SRC/openSUSE:Factory/armnn/armnn.changes      2021-04-08 
21:32:59.171861174 +0200
+++ /work/SRC/openSUSE:Factory/.armnn.new.2988/armnn.changes    2021-05-21 
21:50:36.426086586 +0200
@@ -1,0 +2,23 @@
+Fri May 21 06:55:48 UTC 2021 - Guillaume GARDET <[email protected]>
+
+- Update to 21.05:
+  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.05
+  * Main changes: 
+    - new capabilities to allow users attain higher performance by:
+      * Making the Arm NN Core thread safe opening the possibility of
+      running multiple inferences on the same model in parallel
+      software threads.
+      * Allowing graphs on the GPU backend import their input and
+      output buffers either from correctly aligned main memory or
+      from kernel memory exposed as a dma_buf, thus reducing memory
+      usage and saving the time involved in copying data into and
+      out of the GPU memory space.
+    - In addition to this, support was added to allow the MobileBERT
+    network to be parsed and run.
+    - Finally three deprecated components: the Tensorflow Parser,
+    the Caffe Parser and the Arm NN Quantizer tool, were removed
+- Add patch to fix include path:
+  * armnn-fix-include.patch
+- Disable armnn-extratests as it fails to build with current version
+
+-------------------------------------------------------------------

Old:
----
  armnn-21.02.tar.gz

New:
----
  armnn-21.05.tar.gz
  armnn-fix-include.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ armnn.spec ++++++
--- /var/tmp/diff_new_pack.vjhpE0/_old  2021-05-21 21:50:37.026084101 +0200
+++ /var/tmp/diff_new_pack.vjhpE0/_new  2021-05-21 21:50:37.026084101 +0200
@@ -26,8 +26,6 @@
 %if "%{target}" != ""
 %define package_suffix -%{target}
 %endif
-# Use Tensorflow version 2
-%define tf_version_2 1
 # Compute library has neon enabled for aarch64 only
 %ifarch aarch64
 %bcond_without compute_neon
@@ -47,7 +45,8 @@
 %endif
 # Extra tests require opencv(3)-devel, but it is broken for Leap 15.1 - 
boo#1154091
 %if 0%{?suse_version} > 1500 || 0%{?sle_version} >= 150200
-%bcond_without armnn_extra_tests
+# FIXME: disabled for now, as it fails with version 21.05
+%bcond_with armnn_extra_tests
 %else
 %bcond_with armnn_extra_tests
 %endif
@@ -57,16 +56,6 @@
 %else
 %bcond_with armnn_flatbuffers
 %endif
-# Enable TensorFlow on TW and Leap 15.2/SLE15SP2 for aarch64 and x86_64 (TF 
fails to build on armv7)
-%if 0%{?suse_version} > 1500 || 0%{?sle_version} >= 150200
-%ifarch aarch64 x86_64
-%bcond_without armnn_tf
-%else
-%bcond_with armnn_tf
-%endif # ifarch
-%else  # suse_version
-%bcond_with armnn_tf
-%endif # suse_version
 # ONNX is available on Leap 15.2+/SLE15SP2+, but there is a compatibility issue
 # with ONNX 1.7.0 in Tumbleweed - 
https://github.com/ARM-software/armnn/issues/419
 %if 0%{?sle_version} >= 150200
@@ -75,12 +64,11 @@
 %bcond_with armnn_onnx
 %endif
 %define version_major 21
-%define version_minor 02
-%define version_lib 24
-# Do not package ArmnnConverter and ArmnnQuantizer, by default
+%define version_minor 05
+%define version_lib 25
+%define version_lib_tfliteparser 24
+# Do not package ArmnnConverter, by default
 %bcond_with armnn_tools
-# Enable CAFFE
-%bcond_without armnn_caffe
 Name:           armnn%{?package_suffix}
 Version:        %{version_major}.%{version_minor}
 Release:        0
@@ -92,6 +80,8 @@
 Source1:        armnn-rpmlintrc
 # PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/499
 Patch1:         96beb97.diff
+# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/545
+Patch2:         armnn-fix-include.patch
 # PATCHES to add downstream ArmnnExamples binary - 
https://layers.openembedded.org/layerindex/recipe/87610/
 Patch200:       0003-add-more-test-command-line-arguments.patch
 Patch201:       0005-add-armnn-mobilenet-test-example.patch
@@ -121,22 +111,16 @@
 BuildRequires:  libboost_thread-devel >= 1.59
 %endif
 %endif
-%if %{with armnn_caffe}
-BuildRequires:  caffe-devel
-%endif
 %if %{with armnn_flatbuffers}
 BuildRequires:  flatbuffers-devel
-%if %{tf_version_2}
 BuildRequires:  tensorflow2-lite-devel
-%else
-BuildRequires:  tensorflow-lite-devel
-%endif
 %endif
 %if %{with compute_cl}
 # Mesa-libOpenCl is required for tests
 BuildRequires:  Mesa-libOpenCL
 BuildRequires:  ocl-icd-devel
 BuildRequires:  opencl-cpp-headers
+BuildRequires:  opencl-headers
 %endif
 %if %{with armnn_extra_tests}
 %if 0%{?suse_version} > 1500
@@ -151,13 +135,6 @@
 %if %{with armnn_tests}
 BuildRequires:  stb-devel
 %endif
-%if %{with armnn_tf}
-%if %{tf_version_2}
-BuildRequires:  tensorflow2-devel
-%else
-BuildRequires:  tensorflow-devel
-%endif
-%endif
 %if %{with PyArmnn}
 BuildRequires:  python3-devel
 BuildRequires:  python3-wheel
@@ -168,17 +145,11 @@
 %endif
 %if %{with armnn_flatbuffers}
 Requires:       libarmnnSerializer%{version_lib}%{?package_suffix} = %{version}
-Requires:       libarmnnTfLiteParser%{version_lib}%{?package_suffix} = 
%{version}
-%endif
-%if %{with armnn_caffe}
-Requires:       libarmnnCaffeParser%{version_lib}%{?package_suffix} = 
%{version}
+Requires:       
libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} = %{version}
 %endif
 %if %{with armnn_onnx}
 Requires:       libarmnnOnnxParser%{version_lib}%{?package_suffix} = %{version}
 %endif
-%if %{with armnn_tf}
-Requires:       libarmnnTfParser%{version_lib}%{?package_suffix} = %{version}
-%endif
 # Make sure we do not install both openCL and non-openCL (CPU only) versions.
 %if "%{target}" == "opencl"
 Conflicts:      armnn
@@ -191,7 +162,7 @@
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 %package devel
@@ -210,23 +181,17 @@
 %endif
 %if %{with armnn_flatbuffers}
 Requires:       libarmnnSerializer%{version_lib}%{?package_suffix} = %{version}
-Requires:       libarmnnTfLiteParser%{version_lib}%{?package_suffix} = 
%{version}
-%endif
-%if %{with armnn_caffe}
-Requires:       libarmnnCaffeParser%{version_lib}%{?package_suffix} = 
%{version}
+Requires:       
libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} = %{version}
 %endif
 %if %{with armnn_onnx}
 Requires:       libarmnnOnnxParser%{version_lib}%{?package_suffix} = %{version}
 %endif
-%if %{with armnn_tf}
-Requires:       libarmnnTfParser%{version_lib}%{?package_suffix} = %{version}
-%endif
 
 %description devel
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains the development libraries and headers for armnn.
@@ -248,7 +213,7 @@
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains additionnal downstream tests for armnn.
@@ -267,7 +232,7 @@
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains the libarmnn library from armnn.
@@ -285,7 +250,7 @@
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains the libarmnnBasePipeServer library from armnn.
@@ -303,7 +268,7 @@
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains the libtimelineDecoder library from armnn.
@@ -321,7 +286,7 @@
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains the libtimelineDecoder library from armnn.
@@ -340,70 +305,30 @@
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains the libarmnnSerializer library from armnn.
 
-%package -n libarmnnTfLiteParser%{version_lib}%{?package_suffix}
+%package -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix}
 Summary:        libarmnnTfLiteParser from armnn
 Group:          Development/Libraries/C and C++
 %if "%{target}" == "opencl"
-Conflicts:      libarmnnTfLiteParser%{version_lib}
+Conflicts:      libarmnnTfLiteParser%{version_lib_tfliteparser}
 %else
-Conflicts:      libarmnnTfLiteParser%{version_lib}-opencl
+Conflicts:      libarmnnTfLiteParser%{version_lib_tfliteparser}-opencl
 %endif
 
-%description -n libarmnnTfLiteParser%{version_lib}%{?package_suffix}
+%description -n 
libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix}
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains the libarmnnTfLiteParser library from armnn.
 %endif
 
-%if %{with armnn_tf}
-%package -n libarmnnTfParser%{version_lib}%{?package_suffix}
-Summary:        libarmnnTfParser from armnn
-Group:          Development/Libraries/C and C++
-%if "%{target}" == "opencl"
-Conflicts:      libarmnnTfParser%{version_lib}
-%else
-Conflicts:      libarmnnTfParser%{version_lib}-opencl
-%endif
-
-%description -n libarmnnTfParser%{version_lib}%{?package_suffix}
-Arm NN is an inference engine for CPUs, GPUs and NPUs.
-It bridges the gap between existing NN frameworks and the underlying IP.
-It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
-modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
-
-This package contains the libarmnnTfParser library from armnn.
-%endif
-
-%if %{with armnn_caffe}
-%package -n libarmnnCaffeParser%{version_lib}%{?package_suffix}
-Summary:        libarmnnCaffeParser from armnn
-Group:          Development/Libraries/C and C++
-%if "%{target}" == "opencl"
-Conflicts:      libarmnnCaffeParser%{version_lib}
-%else
-Conflicts:      libarmnnCaffeParser%{version_lib}-opencl
-%endif
-
-%description -n libarmnnCaffeParser%{version_lib}%{?package_suffix}
-Arm NN is an inference engine for CPUs, GPUs and NPUs.
-It bridges the gap between existing NN frameworks and the underlying IP.
-It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
-modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
-
-This package contains the libarmnnCaffeParser library from armnn.
-%endif
-
 %if %{with armnn_onnx}
 %package -n libarmnnOnnxParser%{version_lib}%{?package_suffix}
 Summary:        libarmnnOnnxParser from armnn
@@ -418,7 +343,7 @@
 Arm NN is an inference engine for CPUs, GPUs and NPUs.
 It bridges the gap between existing NN frameworks and the underlying IP.
 It enables efficient translation of existing neural network frameworks,
-such as TensorFlow and Caffe, allowing them to run efficiently ??? without
+such as TensorFlow Lite, allowing them to run efficiently ??? without
 modification ??? across Arm Cortex CPUs and Arm Mali GPUs.
 
 This package contains the libarmnnOnnxParser library from armnn.
@@ -426,7 +351,13 @@
 
 %prep
 %setup -q -n armnn-%{version}
+%if %{with armnn_flatbuffers}
+%if %{pkg_vcmp tensorflow2-lite-devel >= 2.4}
+# This patch breaks build on TF < 2.4
 %patch1 -p1
+%endif
+%endif
+%patch2 -p1
 %if %{with armnn_extra_tests}
 %patch200 -p1
 %patch201 -p1
@@ -447,27 +378,11 @@
 %if 0%{?suse_version} > 1500
 export CXX_ADDITIONAL_FLAGS="$CXX_ADDITIONAL_FLAGS -Wno-error=deprecated-copy 
-Wno-error=deprecated-declarations"
 %endif
-%if 0%{?sle_version} == 150200
-%if %{with armnn_tf}
-%if %{tf_version_2}
-# TensorFlow2 in Leap 15.2 shows erros on major/minor due to '-Werror' option:
-#   
/usr/lib/python3.6/site-packages/tensorflow_core/include/tensorflow/core/protobuf/autotuning.pb.cc:930:13:
 error: In the GNU C Library, "major" is defined by <sys/sysmacros.h>.
-#   For historical compatibility, it is currently defined by <sys/types.h> as 
well, but we plan to remove this soon. To use "major", include 
<sys/sysmacros.h> directly.
-sed -i 's/-Werror//' ./cmake/GlobalConfig.cmake
-%endif
-%endif
-%endif
 %cmake \
   -DCMAKE_SKIP_RPATH=True \
   -DSHARED_BOOST=1 \
   -DCMAKE_CXX_FLAGS:STRING="%{optflags} -pthread $CXX_ADDITIONAL_FLAGS 
-Wno-error=implicit-fallthrough -Wno-error=unused-parameter" \
   -DBOOST_LIBRARYDIR=%{_libdir} \
-%if %{with armnn_caffe}
-  -DBUILD_CAFFE_PARSER=ON \
-%else
-  -DBUILD_CAFFE_PARSER=OFF \
-%endif
-  -DCAFFE_GENERATED_SOURCES=%{_includedir}/ \
 %if %{with armnn_onnx}
   -DBUILD_ONNX_PARSER=ON \
   -DONNX_GENERATED_SOURCES=../onnx_deps/ \
@@ -478,24 +393,12 @@
   -DBUILD_ARMNN_SERIALIZER=ON \
   -DFLATC_DIR=%{_bindir} \
   -DFLATBUFFERS_INCLUDE_PATH=%{_includedir} \
-  -DBUILD_ARMNN_QUANTIZER=ON \
   -DBUILD_TF_LITE_PARSER=ON \
   -DTF_LITE_SCHEMA_INCLUDE_PATH=%{_includedir}/tensorflow/lite/schema/ \
 %else
   -DBUILD_ARMNN_SERIALIZER=OFF \
-  -DBUILD_ARMNN_QUANTIZER=OFF \
   -DBUILD_TF_LITE_PARSER=OFF \
 %endif
-%if %{with armnn_tf}
-  -DBUILD_TF_PARSER=ON \
-%if %{tf_version_2}
-  -DTF_GENERATED_SOURCES=%{python3_sitelib}/tensorflow_core/include/ \
-%else
-  -DTF_GENERATED_SOURCES=%{python3_sitelib}/tensorflow/include/ \
-%endif
-%else
-  -DBUILD_TF_PARSER=OFF \
-%endif
 %if %{with compute_neon} || %{with compute_cl}
   -DARMCOMPUTE_INCLUDE=%{_includedir} \
   -DHALF_INCLUDE=%{_includedir}/half \
@@ -568,8 +471,6 @@
 %if %{with armnn_tools}
 # Install ArmNNConverter
 cp $CP_ARGS ./build/ArmnnConverter %{buildroot}%{_bindir}
-# Install ArmNNQuantizer
-cp $CP_ARGS ./build/ArmnnQuantizer %{buildroot}%{_bindir}
 %endif
 %endif
 # Drop static libs - https://github.com/ARM-software/armnn/issues/514
@@ -579,10 +480,6 @@
 %if %{without compute_cl} && %{with armnn_tests}
 %check
 # Run tests
-%if !%{tf_version_2}
-# Skip some TF Lite tests because TensorFlow < 1.14 is used and make some 
tests failing
-export UnitTestFlags="-t !TensorflowLiteParser/SliceSingleDim -t 
!TensorflowLiteParser/SliceD123 -t !TensorflowLiteParser/SliceD213 -t 
!TensorflowLiteParser/TransposeWithPermuteData -t 
!TensorflowLiteParser/TransposeWithoutPermuteDims"
-%endif
 LD_LIBRARY_PATH="$(pwd)/build/" \
 ./build/UnitTests $UnitTestFlags
 %endif
@@ -603,18 +500,8 @@
 %post -n libarmnnSerializer%{version_lib}%{?package_suffix} -p /sbin/ldconfig
 %postun -n libarmnnSerializer%{version_lib}%{?package_suffix} -p /sbin/ldconfig
 
-%post -n libarmnnTfLiteParser%{version_lib}%{?package_suffix} -p /sbin/ldconfig
-%postun -n libarmnnTfLiteParser%{version_lib}%{?package_suffix} -p 
/sbin/ldconfig
-%endif
-
-%if %{with armnn_tf}
-%post -n libarmnnTfParser%{version_lib}%{?package_suffix} -p /sbin/ldconfig
-%postun -n libarmnnTfParser%{version_lib}%{?package_suffix} -p /sbin/ldconfig
-%endif
-
-%if %{with armnn_caffe}
-%post -n libarmnnCaffeParser%{version_lib}%{?package_suffix} -p /sbin/ldconfig
-%postun -n libarmnnCaffeParser%{version_lib}%{?package_suffix} -p 
/sbin/ldconfig
+%post -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} -p 
/sbin/ldconfig
+%postun -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} 
-p /sbin/ldconfig
 %endif
 
 %if %{with armnn_onnx}
@@ -628,24 +515,15 @@
 %license LICENSE
 %if %{with armnn_tests}
 %{_bindir}/ExecuteNetwork
-%if %{with armnn_caffe}
-%{_bindir}/Caffe*-Armnn
-%{_bindir}/MultipleNetworksCifar10
-%endif
 %if %{with armnn_flatbuffers}
 %if %{with armnn_tools}
 %{_bindir}/ArmnnConverter
-%{_bindir}/ArmnnQuantizer
 %endif
 %{_bindir}/TfLite*-Armnn
-%{_bindir}/Image*Generator
 %endif
 %if %{with armnn_onnx}
 %{_bindir}/Onnx*-Armnn
 %endif
-%if %{with armnn_tf}
-%{_bindir}/Tf*-Armnn
-%endif
 %if %{with armnn_flatbuffers}
 %{_bindir}/SimpleSample
 %endif
@@ -672,20 +550,10 @@
 %files -n libarmnnSerializer%{version_lib}%{?package_suffix}
 %{_libdir}/libarmnnSerializer.so.*
 
-%files -n libarmnnTfLiteParser%{version_lib}%{?package_suffix}
+%files -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix}
 %{_libdir}/libarmnnTfLiteParser.so.*
 %endif
 
-%if %{with armnn_tf}
-%files -n libarmnnTfParser%{version_lib}%{?package_suffix}
-%{_libdir}/libarmnnTfParser.so.*
-%endif
-
-%if %{with armnn_caffe}
-%files -n libarmnnCaffeParser%{version_lib}%{?package_suffix}
-%{_libdir}/libarmnnCaffeParser.so.*
-%endif
-
 %if %{with armnn_onnx}
 %files -n libarmnnOnnxParser%{version_lib}%{?package_suffix}
 %{_libdir}/libarmnnOnnxParser.so.*
@@ -706,18 +574,12 @@
 %{_includedir}/armnn/utility/*.hpp
 %dir %{_includedir}/armnnUtils
 %{_includedir}/armnnUtils/*.hpp
-%dir %{_includedir}/armnnCaffeParser/
-%{_includedir}/armnnCaffeParser/*.hpp
 %dir %{_includedir}/armnnOnnxParser/
 %{_includedir}/armnnOnnxParser/*.hpp
 %dir %{_includedir}/armnnTfLiteParser/
 %{_includedir}/armnnTfLiteParser/*.hpp
-%dir %{_includedir}/armnnTfParser/
-%{_includedir}/armnnTfParser/*.hpp
 %dir %{_includedir}/armnnDeserializer/
 %{_includedir}/armnnDeserializer/IDeserializer.hpp
-%dir %{_includedir}/armnnQuantizer
-%{_includedir}/armnnQuantizer/INetworkQuantizer.hpp
 %dir %{_includedir}/armnnSerializer/
 %{_includedir}/armnnSerializer/ISerializer.hpp
 %dir %{_libdir}/cmake/armnn
@@ -730,12 +592,6 @@
 %{_libdir}/libarmnnSerializer.so
 %{_libdir}/libarmnnTfLiteParser.so
 %endif
-%if %{with armnn_tf}
-%{_libdir}/libarmnnTfParser.so
-%endif
-%if %{with armnn_caffe}
-%{_libdir}/libarmnnCaffeParser.so
-%endif
 %if %{with armnn_onnx}
 %{_libdir}/libarmnnOnnxParser.so
 %endif

++++++ armnn-21.02.tar.gz -> armnn-21.05.tar.gz ++++++
++++ 87190 lines of diff (skipped)

++++++ armnn-fix-include.patch ++++++
--- armnn-21.05/src/backends/cl/ClImportTensorHandle.hpp.orig   2021-05-21 
11:52:42.847054405 +0000
+++ armnn-21.05/src/backends/cl/ClImportTensorHandle.hpp        2021-05-21 
11:52:52.676788673 +0000
@@ -19,7 +19,7 @@
 #include <arm_compute/core/TensorShape.h>
 #include <arm_compute/core/Coordinates.h>
 
-#include <include/CL/cl_ext.h>
+#include <CL/cl_ext.h>
 #include <arm_compute/core/CL/CLKernelLibrary.h>
 
 namespace armnn

Reply via email to