This is an automated email from the ASF dual-hosted git repository.

ptrendx pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
     new d509b79  Fix TRT INT8 unsupported hardware error handling (#19349)
d509b79 is described below

commit d509b79990152f3a33e700ebd4a15ad8353de2c9
Author: Serge Panev <[email protected]>
AuthorDate: Mon Oct 19 16:28:49 2020 -0700

    Fix TRT INT8 unsupported hardware error handling (#19349)
    
    Signed-off-by: Serge Panev <[email protected]>
---
 src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc         | 2 ++
 src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.cc | 4 ++++
 src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.h  | 2 ++
 tests/python/tensorrt/test_tensorrt.py                     | 6 ------
 4 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc 
b/src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc
index fc4809d..96e0544 100644
--- a/src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc
+++ b/src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc
@@ -143,6 +143,7 @@ std::tuple<unique_ptr<nvinfer1::ICudaEngine>,
       builder_config->setInt8Calibrator(calibrator);
     } else {
       LOG(WARNING) << "TensorRT can't use int8 on this platform";
+      calibrator->setDone();
       calibrator = nullptr;
     }
   }
@@ -177,6 +178,7 @@ std::tuple<unique_ptr<nvinfer1::ICudaEngine>,
       trt_builder->setInt8Calibrator(calibrator);
     } else {
       LOG(WARNING) << "TensorRT can't use int8 on this platform";
+      calibrator->setDone();
       calibrator = nullptr;
     }
   }
diff --git a/src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.cc 
b/src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.cc
index 8ba7a3a..d5ee350 100644
--- a/src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.cc
+++ b/src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.cc
@@ -118,6 +118,10 @@ void TRTInt8Calibrator::writeCalibrationCache(const void* 
ptr,
           << " length=" << length;
 }
 
+void TRTInt8Calibrator::setDone() {
+  done_ = true;
+}
+
 void TRTInt8Calibrator::waitAndSetDone() {
   std::unique_lock<std::mutex> lk(mutex_);
   cv_.wait(lk, [&]{ return (!batch_is_set_ && !calib_running_) || done_; });
diff --git a/src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.h 
b/src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.h
index e6a5efb..bb81c9e 100644
--- a/src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.h
+++ b/src/operator/subgraph/tensorrt/tensorrt_int8_calibrator.h
@@ -75,6 +75,8 @@ struct TRTInt8Calibrator : public 
nvinfer1::IInt8EntropyCalibrator2 {
   // TODO(spanev): determine if we need to serialize it
   const std::string& getCalibrationTableAsString() { return 
calibration_table_; }
 
+  void setDone();
+
   void waitAndSetDone();
 
   bool isCacheEmpty();
diff --git a/tests/python/tensorrt/test_tensorrt.py 
b/tests/python/tensorrt/test_tensorrt.py
index 20b84d0..1aecf94 100644
--- a/tests/python/tensorrt/test_tensorrt.py
+++ b/tests/python/tensorrt/test_tensorrt.py
@@ -143,12 +143,6 @@ def get_top1(logits):
 
 def test_tensorrt_symbol_int8():
     ctx = mx.gpu(0)
-    cuda_arch = get_cuda_compute_capability(ctx)
-    cuda_arch_min = 70
-    if cuda_arch < cuda_arch_min:
-        print('Bypassing test_tensorrt_symbol_int8 on cuda arch {}, need arch 
>= {}).'.format(
-              cuda_arch, cuda_arch_min))
-        return
 
     # INT8 engine output are not lossless, so we don't expect numerical 
uniformity,
     # but we have to compare the TOP1 metric

Reply via email to