masahi edited a comment on issue #5243: [Frontend][TensorFlow]Improve TensorFlow Static Shape Tensor Array URL: https://github.com/apache/incubator-tvm/pull/5243#issuecomment-609602675 Update: With the new static tensor array, I got the following PyTorch LSTM model, originally from the fastrnn benchmark in PyTorch repo here https://github.com/pytorch/pytorch/blob/master/benchmarks/fastrnns/custom_lstms.py#L187, converted correctly to Relay and got the identical result as torch! It was not possible with generic tensor array. @kevinthesun @wweic ```Py class LSTMCell(jit.ScriptModule): def __init__(self, input_size, hidden_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size)) self.weight_hh = Parameter(torch.randn(4 * hidden_size, hidden_size)) self.bias_ih = Parameter(torch.randn(4 * hidden_size)) self.bias_hh = Parameter(torch.randn(4 * hidden_size)) @jit.script_method def forward(self, input, state): # type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]] hx, cx = state gates = (torch.mm(input, self.weight_ih.t()) + self.bias_ih + torch.mm(hx, self.weight_hh.t()) + self.bias_hh) ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) ingate = torch.sigmoid(ingate) forgetgate = torch.sigmoid(forgetgate) cellgate = torch.tanh(cellgate) outgate = torch.sigmoid(outgate) cy = (forgetgate * cx) + (ingate * cellgate) hy = outgate * torch.tanh(cy) return hy, (hy, cy) class LSTMLayer(jit.ScriptModule): def __init__(self, cell, *cell_args): super().__init__() self.cell = cell(*cell_args) @jit.script_method def forward(self, input, state): # type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]] outputs = [] for i in range(input.size(0)): out, state = self.cell(input[i], state) outputs += [out] return torch.stack(outputs), state ``` Here is the converted Relay IR: ``` fn (%input: Tensor[(5, 2, 3), float32], %v25: Tensor[(16, 3), float32], %v28: Tensor[(16), float32], %v30: Tensor[(16, 4), float32], %v34: Tensor[(16), float32], %states: (Tensor[(2, 4), float32], Tensor[(2, 4), float32])) -> (static_tensor_float32_?_2_4_t[], (Tensor[(2, 4), float32], Tensor[(2, 4), float32])) { %0 = Nil /* ty=List[static_tensor_float32_2_4_t[]] */; %36 = ( let %while_loop: fn (int32, List[static_tensor_float32_2_4_t[]], (Tensor[(2, 4), float32], Tensor[(2, 4), float32])) -> (int32, List[static_tensor_float32_2_4_t[]], (Tensor[(2, 4), float32], Tensor[(2, 4), float32])) = fn (%i.1: int32, %outputs.6: List[static_tensor_float32_2_4_t[]], %state.6: (Tensor[(2, 4), float32], Tensor[(2, 4), float32])) -> (int32, List[static_tensor_float32_2_4_t[]], (Tensor[(2, 4), float32], Tensor[(2, 4), float32])) { %1 = less(%i.1, 5 /* ty=int32 */) /* ty=bool */; if (%1) { %2 = add(%i.1, 1 /* ty=int32 */) /* ty=int32 */; %3 = take(%input, %i.1, axis=0) /* ty=Tensor[(2, 3), float32] */; %4 = transpose(%v25, axes=[1, 0]) /* ty=Tensor[(3, 16), float32] */; %5 = transpose(%4, axes=[1, 0]) /* ty=Tensor[(16, 3), float32] */; %6 = nn.dense(%3, %5, units=None) /* ty=Tensor[(2, 16), float32] */; %7 = add(%6, %v28) /* ty=Tensor[(2, 16), float32] */; %8 = %state.6.0; %9 = transpose(%v30, axes=[1, 0]) /* ty=Tensor[(4, 16), float32] */; %10 = transpose(%9, axes=[1, 0]) /* ty=Tensor[(16, 4), float32] */; %11 = nn.dense(%8, %10, units=None) /* ty=Tensor[(2, 16), float32] */; %12 = add(%7, %11) /* ty=Tensor[(2, 16), float32] */; %13 = add(%12, %v34) /* ty=Tensor[(2, 16), float32] */; %14 = strided_slice(%13, begin=[0, 12], end=[2, 16], strides=[1, 1]) /* ty=Tensor[(2, 4), float32] */; %15 = sigmoid(%14) /* ty=Tensor[(2, 4), float32] */; %16 = strided_slice(%13, begin=[0, 4], end=[2, 8], strides=[1, 1]) /* ty=Tensor[(2, 4), float32] */; %17 = sigmoid(%16) /* ty=Tensor[(2, 4), float32] */; %18 = %state.6.1; %19 = multiply(%17, %18) /* ty=Tensor[(2, 4), float32] */; %20 = strided_slice(%13, begin=[0, 0], end=[2, 4], strides=[1, 1]) /* ty=Tensor[(2, 4), float32] */; %21 = sigmoid(%20) /* ty=Tensor[(2, 4), float32] */; %22 = strided_slice(%13, begin=[0, 8], end=[2, 12], strides=[1, 1]) /* ty=Tensor[(2, 4), float32] */; %23 = tanh(%22) /* ty=Tensor[(2, 4), float32] */; %24 = multiply(%21, %23) /* ty=Tensor[(2, 4), float32] */; %25 = add(%19, %24) /* ty=Tensor[(2, 4), float32] */; %26 = tanh(%25) /* ty=Tensor[(2, 4), float32] */; %27 = multiply(%15, %26) /* ty=Tensor[(2, 4), float32] */; %28 = (%27, %25); %29 = (%27, %28); %30 = %29.0; %31 = tensor_constructor_float32_2_4(%30) /* ty=static_tensor_float32_2_4_t[] */; %32 = Nil /* ty=List[static_tensor_float32_2_4_t[]] */; %33 = Cons(%31, %32) /* ty=List[static_tensor_float32_2_4_t[]] */; %34 = @concat(%outputs.6, %33) /* ty=List[static_tensor_float32_2_4_t[]] */; %35 = %29.1; %while_loop(%2, %34, %35) /* ty=(int32, List[static_tensor_float32_2_4_t[]], (Tensor[(2, 4), float32], Tensor[(2, 4), float32])) */ } else { (%i.1, %outputs.6, %state.6) } }; %while_loop ); %37 = %36(0 /* ty=int32 */, %0, %states) /* ty=(int32, List[static_tensor_float32_2_4_t[]], (Tensor[(2, 4), float32], Tensor[(2, 4), float32])) */; %38 = %37.1; %39 = @tensor_array_stack_float32_2_4(%38) /* ty=static_tensor_float32_?_2_4_t[] */; %40 = %37.2; (%39, %40) } ```
---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] With regards, Apache Git Services
