samskalicky commented on a change in pull request #19064:
URL: https://github.com/apache/incubator-mxnet/pull/19064#discussion_r555454444
##########
File path: src/operator/tensor/ordering_op-inl.h
##########
@@ -208,88 +209,88 @@ using namespace mshadow;
struct fill_ind_to_one {
- template<typename DType>
- MSHADOW_XINLINE static void Map(int i, const index_t* indices, DType* out) {
+ template<typename DType, typename IDXType>
+ MSHADOW_XINLINE static void Map(index_t i, const IDXType* indices, DType*
out) {
out[indices[i]] = static_cast<DType>(1);
}
};
struct fill_ind {
- template<typename DType>
- MSHADOW_XINLINE static void Map(int i, const index_t* indices, const DType*
val,
+ template<typename DType, typename IDXType>
+ MSHADOW_XINLINE static void Map(index_t i, const IDXType* indices, const
DType* val,
int req, DType* out) {
KERNEL_ASSIGN(out[indices[i]], req, val[i]);
}
};
-template<typename DType>
+template<typename DType, typename IDXType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat,
- const Tensor<cpu, 1, index_t>& ind,
+ const Tensor<cpu, 1, IDXType>& ind,
const Tensor<cpu, 1, char>& work,
- index_t K, index_t N, bool is_ascend,
+ IDXType K, IDXType N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
- const index_t M(work.size(0)/(sizeof(DType)*N));
+ const size_t M(work.size(0)/(sizeof(DType)*N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
- for (index_t i = 0; i < M; ++i) {
+ for (index_t i = 0; i < static_cast<index_t>(M); ++i) {
Review comment:
why does this need a static_cast? shouldnt you just create the variable
above with the right type?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]