access2rohit commented on a change in pull request #19340:
URL: https://github.com/apache/incubator-mxnet/pull/19340#discussion_r504272945
##########
File path: src/operator/numpy/np_insert_op_tensor-inl.h
##########
@@ -175,34 +175,34 @@ void NumpyInsertTensorCompute(const nnvm::NodeAttrs&
attrs,
} else {
// broadcast check
for (int i = outshape.ndim() - 1; i >= 0; --i) {
- int sz = outshape[i];
+ size_t sz = outshape[i];
if (i == axis) {
sz = numnew;
}
CHECK((values.shape_[i] == 1) || (values.shape_[i] == sz));
}
size_t temp_storage_bytes, temp_mem_size;
- temp_storage_bytes = SortByKeyWorkspaceSize<int64_t, int,
xpu>(indices_len, false, true);
+ temp_storage_bytes = SortByKeyWorkspaceSize<int64_t, index_t,
xpu>(indices_len, false, true);
temp_mem_size = indices_len * sizeof(int64_t) * 2 +
- indices_len * sizeof(int) +
- outshape[axis] * sizeof(int) * 2 +
+ indices_len * sizeof(index_t) +
+ outshape[axis] * sizeof(index_t) * 2 +
temp_storage_bytes;
Tensor<xpu, 1, char> temp_mem =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(temp_mem_size), s);
int64_t* indices_ptr = reinterpret_cast<int64_t*>(temp_mem.dptr_);
int64_t* sorted_indices_ptr = reinterpret_cast<int64_t*>(indices_ptr +
indices_len);
- int* order_ptr = reinterpret_cast<int*>(sorted_indices_ptr + indices_len);
- int* is_insert = reinterpret_cast<int*>(order_ptr + indices_len);
- int* origin_idx = reinterpret_cast<int*>(is_insert + outshape[axis]);
+ index_t* order_ptr = reinterpret_cast<index_t*>(sorted_indices_ptr +
indices_len);
+ index_t* is_insert = reinterpret_cast<index_t*>(order_ptr + indices_len);
+ index_t* origin_idx = reinterpret_cast<index_t*>(is_insert +
outshape[axis]);
Review comment:
this is how you allocate temporary workspace(with different data types).
order_ptr was `int*` before now i have changed it to `index_t*` and updated
memory to be allocated for the order_ptr as well to `index_t`. Stride is for
pointer index and not for data.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]