pengzhao-intel commented on a change in pull request #14713: [WIP]MKLDNN RNN
Inference Integration(fp32 LSTM and vRNN with tanh and relu)
URL: https://github.com/apache/incubator-mxnet/pull/14713#discussion_r285423753
##########
File path: src/operator/rnn-inl.h
##########
@@ -829,22 +850,23 @@ class RNNOp {
#endif
if (ctx_.dev_type == kCPU) {
- // allocate temp space
- const size_t work_cpu_space_size =
- GetRNNWorkspaceSize(param_.seq_length_, param_.batch_size_,
- param_.state_size, direction, param_.mode);
- if (temp_init_space_ && temp_cpu_space_size_ < work_cpu_space_size) {
- Storage::Get()->Free(temp_cpu_space_);
- temp_init_space_ = false;
- }
- if (!temp_init_space_) {
- temp_cpu_space_ = Storage::Get()->Alloc
- (work_cpu_space_size * sizeof(DType), Context::CPU());
- temp_cpu_space_size_ = work_cpu_space_size;
- temp_init_space_ = true;
- }
- DType* work_cpu_space = static_cast<DType*>(temp_cpu_space_.dptr);
if (ctx.is_train) {
+ // allocate temp space
+ const size_t work_cpu_space_size =
+ GetRNNWorkspaceSize(param_.seq_length_, param_.batch_size_,
+ param_.state_size, direction, param_.mode);
+ if (temp_init_space_ && temp_cpu_space_size_ < work_cpu_space_size) {
+ Storage::Get()->Free(temp_cpu_space_);
+ temp_init_space_ = false;
+ }
+ if (!temp_init_space_) {
+ temp_cpu_space_ = Storage::Get()->Alloc
+ (work_cpu_space_size * sizeof(DType), Context::CPU());
+ temp_cpu_space_size_ = work_cpu_space_size;
+ temp_init_space_ = true;
+ }
+ DType* work_cpu_space = static_cast<DType*>(temp_cpu_space_.dptr);
+
const size_t r_size = GetRNNReserveSpaceSize(param_.num_layers,
direction,
Review comment:
Why move this part from L832 to here?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services