bgawrych commented on a change in pull request #20227:
URL: https://github.com/apache/incubator-mxnet/pull/20227#discussion_r639705039
##########
File path: tests/python/quantization/test_quantization.py
##########
@@ -314,42 +341,46 @@ def check_quantized_elemwise_add(data_shape, qtype):
print('skipped testing quantized_elemwise_add for gpu since it is
not supported yet')
return
- dataA = mx.sym.Variable(name='dataA', shape=data_shape,
dtype='float32')
- dataB = mx.sym.Variable(name='dataB', shape=data_shape,
dtype='float32')
- elemwise_add_fp32 = mx.sym.elemwise_add(dataA, dataB)
- arg_names = elemwise_add_fp32.list_arguments()
- elemwise_add_fp32_exe =
elemwise_add_fp32._simple_bind(ctx=mx.current_context(), grad_req='null')
+ class ElemwiseSumBlock(mx.gluon.nn.HybridBlock):
+ def __init__(self, **kwargs):
+ super(ElemwiseSumBlock, self).__init__(**kwargs)
+
+ def hybrid_forward(self, F, dataA, dataB):
+ return F.elemwise_add(dataA, dataB)
+
+ class QuantElemwiseSumBlock(mx.gluon.nn.HybridBlock):
+ def __init__(self, **kwargs):
+ super(QuantElemwiseSumBlock, self).__init__(**kwargs)
+
+ def hybrid_forward(self, F, dataA, dataB, dataA_min, dataA_max,
dataB_min, dataB_max):
+ return F.contrib.quantized_elemwise_add(dataA, dataB,
dataA_min, dataA_max, dataB_min, dataB_max)
+
+ elemwise_add_fp32 = ElemwiseSumBlock()
+
if qtype == 'uint8':
data_low = 0.0
data_high = 255.0
else:
data_low = -127.0
data_high = 127.0
- dataA_val = mx.nd.random.uniform(low=data_low, high=data_high,
shape=data_shape).astype('int32')
- dataB_val = mx.nd.random.uniform(low=data_low, high=data_high,
shape=data_shape).astype('int32')
- elemwise_add_fp32_exe.arg_dict[arg_names[0]][:] = dataA_val
-
- elemwise_add_fp32_exe.arg_dict[arg_names[1]][:] = dataB_val
-
- output = elemwise_add_fp32_exe.forward()[0]
- qdataA = mx.sym.Variable(name='qdataA', shape=data_shape, dtype=qtype)
- qdataB = mx.sym.Variable(name='qdataB', shape=data_shape, dtype=qtype)
- min_dataA = mx.sym.Variable(name='min_dataA', dtype='float32')
- max_dataA = mx.sym.Variable(name='max_dataA', dtype='float32')
- min_dataB = mx.sym.Variable(name='min_dataB', dtype='float32')
- max_dataB = mx.sym.Variable(name='max_dataB', dtype='float32')
- quantized_elemwise_add = mx.sym.contrib.quantized_elemwise_add(qdataA,
qdataB, min_dataA, max_dataA, min_dataB, max_dataB)
- elemwise_add_int8_exe =
quantized_elemwise_add._simple_bind(ctx=mx.current_context(), grad_req='null')
- qarg_names = quantized_elemwise_add.list_arguments()
- elemwise_add_int8_exe.arg_dict[qarg_names[0]][:] =
elemwise_add_fp32_exe.arg_dict[arg_names[0]].astype(qtype)
- elemwise_add_int8_exe.arg_dict[qarg_names[1]][:] =
elemwise_add_fp32_exe.arg_dict[arg_names[1]].astype(qtype)
+ dataA_val = mx.nd.random.uniform(low=data_low, high=data_high,
shape=data_shape).astype('int32').astype('float32')
+ dataB_val = mx.nd.random.uniform(low=data_low, high=data_high,
shape=data_shape).astype('int32').astype('float32')
+
+ output = elemwise_add_fp32(dataA_val, dataB_val)
+
+ #run quantized
+ quantized_elemwise_add = QuantElemwiseSumBlock()
+ dataA_val_int8 = dataA_val.astype(qtype)
+ dataB_val_int8 = dataB_val.astype(qtype)
quantized_range = 127.0
- elemwise_add_int8_exe.arg_dict[qarg_names[2]][:] = data_low
- elemwise_add_int8_exe.arg_dict[qarg_names[3]][:] = data_high
- elemwise_add_int8_exe.arg_dict[qarg_names[4]][:] = data_low
- elemwise_add_int8_exe.arg_dict[qarg_names[5]][:] = data_high
- qoutput, min_range, max_range = elemwise_add_int8_exe.forward()
+ min_dataA = mx.nd.array([data_low])
+ max_dataA = mx.nd.array([data_high])
+ min_dataB = mx.nd.array([data_low])
+ max_dataB = mx.nd.array([data_high])
+ qoutput, min_range, max_range = quantized_elemwise_add(dataA_val_int8,
dataB_val_int8,
+ min_dataA,
max_dataA,
+ min_dataB,
max_dataB)
int8_rslt = qoutput.astype(output.dtype)*max_range/0x7fffffff
Review comment:
Added spaces but I left equation as it was (max range seems to be float)
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]