ZhennanQin commented on a change in pull request #13697: [MKLDNN] Enable signed
int8 support for convolution.
URL: https://github.com/apache/incubator-mxnet/pull/13697#discussion_r244657039
##########
File path: src/operator/nn/mkldnn/mkldnn_convolution-inl.h
##########
@@ -85,23 +85,28 @@ static inline bool IsOutputUInt8(const MKLDNNConvParam
&mkldnn_param) {
mkldnn_param.with_postsum_relu;
}
-mkldnn::convolution_forward::primitive_desc
-GetConvFwdImpl(const MKLDNNConvFullParam ¶m, const bool is_train,
- const NDArray &data, const NDArray &weights, const NDArray
*bias,
- const NDArray &output);
+mkldnn::convolution_forward::primitive_desc GetConvFwdImpl(const
MKLDNNConvFullParam ¶m,
+ const bool is_train,
+ const NDArray &data,
+ const NDArray
&weights,
+ const NDArray *bias,
+ const NDArray
&output);
class MKLDNNConvForward {
public:
mkldnn::convolution_forward::primitive_desc fwd_pd;
- MKLDNNConvForward(const MKLDNNConvFullParam ¶m, const bool is_train,
- const NDArray &data, const NDArray &weights,
- const NDArray *bias, const NDArray &output)
- : fwd_pd(GetConvFwdImpl(param, is_train, data, weights, bias, output)) {}
+ MKLDNNConvForward(const MKLDNNConvFullParam ¶m, const bool is_train,
const NDArray &data,
+ const NDArray &weights, const NDArray *bias, const NDArray
&output);
void SetNewMem(const mkldnn::memory &data, const mkldnn::memory &weight,
const mkldnn::memory *bias, const mkldnn::memory &output);
+ void SetNewMem(const mkldnn::memory &data, const mkldnn::memory &output) {
Review comment:
in mkldnn_conv.cc. The initialization stage will set weight and bias
properly. And if weight and bias doesn't change, then we will only set data and
output from 2nd iteration.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services