This is an automated email from the ASF dual-hosted git repository.

jxie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit 8fdcb8541b6690ac2d81a1ebcadbcf31adf45910
Author: ThomasDelteil <thomas.delte...@gmail.com>
AuthorDate: Wed Apr 25 15:33:58 2018 -0700

    Update rnn_layer.py
---
 python/mxnet/gluon/rnn/rnn_layer.py | 22 ++++++++++------------
 1 file changed, 10 insertions(+), 12 deletions(-)

diff --git a/python/mxnet/gluon/rnn/rnn_layer.py 
b/python/mxnet/gluon/rnn/rnn_layer.py
index 21b41d9..59dd747 100644
--- a/python/mxnet/gluon/rnn/rnn_layer.py
+++ b/python/mxnet/gluon/rnn/rnn_layer.py
@@ -285,10 +285,10 @@ class RNN(_RNNLayer):
 
     Inputs:
         - **data**: input tensor with shape `(sequence_length, batch_size, 
input_size)`
-          when `layout` is "TNC". For other layouts dimensions are permuted 
accordingly.
-          Be aware that a `transpose` operation with a ndarray results in a 
new allocation of
-          memory. For optimal performance and when applicable, consider 
transposing
-          your layout to "TNC" before loading your data into a ndarray.
+          when `layout` is "TNC". For other layouts, dimensions are permuted 
accordingly
+          using transpose() operator which adds performance overhead. Consider 
creating
+          batches in TNC layout during data batching step.
+
         - **states**: initial recurrent state tensor with shape
           `(num_layers, batch_size, num_hidden)`. If `bidirectional` is True,
           shape will instead be `(2*num_layers, batch_size, num_hidden)`. If
@@ -388,10 +388,9 @@ class LSTM(_RNNLayer):
 
     Inputs:
         - **data**: input tensor with shape `(sequence_length, batch_size, 
input_size)`
-          when `layout` is "TNC". For other layouts dimensions are permuted 
accordingly.
-          Be aware that a `transpose` operation with a ndarray results in a 
new allocation of
-          memory. For optimal performance and when applicable, consider 
transposing
-          your layout to "TNC" before loading your data into a ndarray.
+          when `layout` is "TNC". For other layouts, dimensions are permuted 
accordingly
+          using transpose() operator which adds performance overhead. Consider 
creating
+          batches in TNC layout during data batching step.
         - **states**: a list of two initial recurrent state tensors. Each has 
shape
           `(num_layers, batch_size, num_hidden)`. If `bidirectional` is True,
           shape will instead be `(2*num_layers, batch_size, num_hidden)`. If
@@ -488,10 +487,9 @@ class GRU(_RNNLayer):
 
     Inputs:
         - **data**: input tensor with shape `(sequence_length, batch_size, 
input_size)`
-          when `layout` is "TNC". For other layouts dimensions are permuted 
accordingly.
-          Be aware that a `transpose` operation with a ndarray results in a 
new allocation of
-          memory. For optimal performance and when applicable, consider 
transposing
-          your layout to "TNC" before loading your data into a ndarray.
+          when `layout` is "TNC". For other layouts, dimensions are permuted 
accordingly
+          using transpose() operator which adds performance overhead. Consider 
creating
+          batches in TNC layout during data batching step.
         - **states**: initial recurrent state tensor with shape
           `(num_layers, batch_size, num_hidden)`. If `bidirectional` is True,
           shape will instead be `(2*num_layers, batch_size, num_hidden)`. If

-- 
To stop receiving notification emails like this one, please contact
j...@apache.org.

Reply via email to