anijain2305 commented on a change in pull request #5848: URL: https://github.com/apache/incubator-tvm/pull/5848#discussion_r445827227
########## File path: python/tvm/relay/frontend/tflite.py ########## @@ -2566,17 +2613,27 @@ def convert_quantize(self, op): input_tensors = self.get_input_tensors(op) assert len(input_tensors) == 1, "input tensors length should be 1" input_tensor = input_tensors[0] + input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type()) in_expr = self.get_expr(input_tensor.tensor_idx) output_tensors = self.get_output_tensors(op) assert len(output_tensors) == 1, "output tensors length should be 1" output_tensor = output_tensors[0] + output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type()) # The output must be quantized assert output_tensor.qnn_params - # Quantize the input - out = self.quantize(in_expr, output_tensor) + # TFLite Quantize op can also act as Requantize op + if input_tensor_type_str == "float32": + out = self.quantize(in_expr, output_tensor) + else: + out = _qnn.op.requantize(in_expr, + input_scale=input_tensor.qnn_params['scale'], + input_zero_point=input_tensor.qnn_params['zero_point'], + output_scale=output_tensor.qnn_params['scale'], + output_zero_point=output_tensor.qnn_params['zero_point'], + out_dtype=output_tensor_type_str) return out Review comment: ![image](https://user-images.githubusercontent.com/13822661/85793474-ebec7980-b6e9-11ea-8e53-fa3121ba50c0.png) Above test case added to force both types of quantize nodes ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org