anko-intel commented on code in PR #21043:
URL: https://github.com/apache/incubator-mxnet/pull/21043#discussion_r905001518
##########
src/operator/subgraph/dnnl/dnnl_fc.cc:
##########
@@ -152,6 +153,26 @@ void SgDNNLFCOp::Forward(const OpContext& ctx,
dnnl::reorder(*in_dnnl_mem, *tmp_mem),
{{DNNL_ARG_FROM, *in_dnnl_mem}, {DNNL_ARG_TO, *tmp_mem}});
output = NDArray(tmp_mem);
+ } else if (in_data[idx.sum].dtype() == mshadow::kUint8 &&
+ out_data[out_index].dtype() == mshadow::kInt8) {
+ auto sum_mem_desc = in_dnnl_mem->get_desc();
+ auto out_dtype = get_dnnl_type(mshadow::kInt8);
+ sum_mem_desc.data.data_type = static_cast<dnnl_data_type_t>(out_dtype);
+ dnnl_mem_ptr tmp_mem(new dnnl::memory(
+ sum_mem_desc, CpuEngine::Get()->get_engine(),
out_dnnl_mem->get_data_handle()));
+ DNNLStream::Get()->RegisterMem(tmp_mem);
+ const float u8_reorder_scale = 0.5;
Review Comment:
You can define 0.5 as const or macro in this file and use it in both places
- to make it clear for the reader
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]