HappenLee commented on a change in pull request #7972:
URL: https://github.com/apache/incubator-doris/pull/7972#discussion_r804373145



##########
File path: be/src/vec/exec/join/vhash_join_node.cpp
##########
@@ -168,46 +177,98 @@ struct ProcessHashTableProbe {
     // the output block struct is same with mutable block. we can do more opt 
on it and simplify
     // the logic of probe
     // TODO: opt the visited here to reduce the size of hash table
+    template<bool is_left_semi_join, bool is_left_anti_join, bool 
is_inner_join>
     Status do_process(HashTableContext& hash_table_ctx, ConstNullMapPtr 
null_map,
                       MutableBlock& mutable_block, Block* output_block) {
         using KeyGetter = typename HashTableContext::State;
         using Mapped = typename HashTableContext::Mapped;
 
         KeyGetter key_getter(_probe_raw_ptrs, _join_node->_probe_key_sz, 
nullptr);
-
+    
         std::vector<uint32_t> items_counts(_probe_rows);
         auto& mcol = mutable_block.mutable_columns();
-
-        int right_col_idx = _join_node->_is_right_semi_anti ? 0 : 
_left_table_data_types.size();
-        int right_col_len = _right_table_data_types.size();
         int current_offset = 0;
 
         for (; _probe_index < _probe_rows;) {
-            // ignore null rows
             if constexpr (ignore_null) {
                 if ((*null_map)[_probe_index]) {
                     items_counts[_probe_index++] = 0;
                     continue;
                 }
             }
-
             int repeat_count = 0;
-            auto find_result =
-                    (*null_map)[_probe_index]
+            if constexpr (is_inner_join) {
+                if (!(*null_map)[_probe_index]) {
+                    auto find_result = 
key_getter.find_key(hash_table_ctx.hash_table, _probe_index, _arena);
+
+                    if (find_result.is_found()) {
+                        auto& mapped = find_result.get_mapped();
+
+                        // TODO: Iterators are currently considered to be a 
heavy operation and have a certain impact on performance.
+                        // We should rethink whether to use this iterator mode 
in the future. Now just opt the one row case
+                        if (mapped.get_row_count() == 1) {
+                            mapped.visited = true;
+                            // right semi/anti join should dispose the data in 
hash table
+                            // after probe data eof
+                            ++repeat_count;
+                            for (size_t j = 0; j < _right_col_len; ++j) {
+                                auto& column = 
*mapped.block->get_by_position(j).column;
+                                mcol[j + _right_col_idx]->insert_from(column, 
mapped.row_num);
+                            }
+                        } else {
+                            if (_probe_index + 2 < _probe_rows)
+                                key_getter.prefetch(hash_table_ctx.hash_table, 
_probe_index + 2, _arena);
+                            for (auto it = mapped.begin(); it.ok(); ++it) {
+                                // right semi/anti join should dispose the 
data in hash table
+                                // after probe data eof
+                                ++repeat_count;
+                                for (size_t j = 0; j < _right_col_len; ++j) {
+                                    auto& column = 
*it->block->get_by_position(j).column;
+                                    // TODO: interface insert from cause 
serious performance problems
+                                    //  when column is nullable. Try to make 
more effective way
+                                    mcol[j + 
_right_col_idx]->insert_from(column, it->row_num);
+                                }
+                                it->visited = true;
+                            }
+                        }
+                    }
+                }
+            } else if constexpr (is_left_anti_join) {
+                if ((*null_map)[_probe_index]) { 
+                    ++repeat_count;
+                } else {
+                    auto find_result = 
key_getter.find_key(hash_table_ctx.hash_table, _probe_index, _arena);
+                    if (find_result.is_found()) { 
+                        //do nothing
+                    } else {
+                        ++repeat_count;
+                    }
+                }
+            } else if constexpr(is_left_semi_join) {
+                int repeat_count = 0;

Review comment:
       pre define repeat count in 199line

##########
File path: be/src/vec/exec/join/vhash_join_node.cpp
##########
@@ -168,46 +177,98 @@ struct ProcessHashTableProbe {
     // the output block struct is same with mutable block. we can do more opt 
on it and simplify
     // the logic of probe
     // TODO: opt the visited here to reduce the size of hash table
+    template<bool is_left_semi_join, bool is_left_anti_join, bool 
is_inner_join>
     Status do_process(HashTableContext& hash_table_ctx, ConstNullMapPtr 
null_map,
                       MutableBlock& mutable_block, Block* output_block) {
         using KeyGetter = typename HashTableContext::State;
         using Mapped = typename HashTableContext::Mapped;
 
         KeyGetter key_getter(_probe_raw_ptrs, _join_node->_probe_key_sz, 
nullptr);
-
+    
         std::vector<uint32_t> items_counts(_probe_rows);
         auto& mcol = mutable_block.mutable_columns();
-
-        int right_col_idx = _join_node->_is_right_semi_anti ? 0 : 
_left_table_data_types.size();
-        int right_col_len = _right_table_data_types.size();
         int current_offset = 0;
 
         for (; _probe_index < _probe_rows;) {
-            // ignore null rows
             if constexpr (ignore_null) {
                 if ((*null_map)[_probe_index]) {
                     items_counts[_probe_index++] = 0;
                     continue;
                 }
             }
-
             int repeat_count = 0;
-            auto find_result =
-                    (*null_map)[_probe_index]
+            if constexpr (is_inner_join) {
+                if (!(*null_map)[_probe_index]) {
+                    auto find_result = 
key_getter.find_key(hash_table_ctx.hash_table, _probe_index, _arena);

Review comment:
       why not pretch here ?

##########
File path: be/src/vec/exec/join/vhash_join_node.cpp
##########
@@ -168,46 +177,98 @@ struct ProcessHashTableProbe {
     // the output block struct is same with mutable block. we can do more opt 
on it and simplify
     // the logic of probe
     // TODO: opt the visited here to reduce the size of hash table
+    template<bool is_left_semi_join, bool is_left_anti_join, bool 
is_inner_join>
     Status do_process(HashTableContext& hash_table_ctx, ConstNullMapPtr 
null_map,
                       MutableBlock& mutable_block, Block* output_block) {
         using KeyGetter = typename HashTableContext::State;
         using Mapped = typename HashTableContext::Mapped;
 
         KeyGetter key_getter(_probe_raw_ptrs, _join_node->_probe_key_sz, 
nullptr);
-
+    
         std::vector<uint32_t> items_counts(_probe_rows);
         auto& mcol = mutable_block.mutable_columns();
-
-        int right_col_idx = _join_node->_is_right_semi_anti ? 0 : 
_left_table_data_types.size();
-        int right_col_len = _right_table_data_types.size();
         int current_offset = 0;
 
         for (; _probe_index < _probe_rows;) {
-            // ignore null rows
             if constexpr (ignore_null) {
                 if ((*null_map)[_probe_index]) {
                     items_counts[_probe_index++] = 0;
                     continue;
                 }
             }
-
             int repeat_count = 0;
-            auto find_result =
-                    (*null_map)[_probe_index]
+            if constexpr (is_inner_join) {
+                if (!(*null_map)[_probe_index]) {
+                    auto find_result = 
key_getter.find_key(hash_table_ctx.hash_table, _probe_index, _arena);
+
+                    if (find_result.is_found()) {
+                        auto& mapped = find_result.get_mapped();
+
+                        // TODO: Iterators are currently considered to be a 
heavy operation and have a certain impact on performance.
+                        // We should rethink whether to use this iterator mode 
in the future. Now just opt the one row case
+                        if (mapped.get_row_count() == 1) {
+                            mapped.visited = true;
+                            // right semi/anti join should dispose the data in 
hash table
+                            // after probe data eof
+                            ++repeat_count;
+                            for (size_t j = 0; j < _right_col_len; ++j) {
+                                auto& column = 
*mapped.block->get_by_position(j).column;
+                                mcol[j + _right_col_idx]->insert_from(column, 
mapped.row_num);
+                            }
+                        } else {
+                            if (_probe_index + 2 < _probe_rows)
+                                key_getter.prefetch(hash_table_ctx.hash_table, 
_probe_index + 2, _arena);
+                            for (auto it = mapped.begin(); it.ok(); ++it) {
+                                // right semi/anti join should dispose the 
data in hash table
+                                // after probe data eof
+                                ++repeat_count;
+                                for (size_t j = 0; j < _right_col_len; ++j) {
+                                    auto& column = 
*it->block->get_by_position(j).column;
+                                    // TODO: interface insert from cause 
serious performance problems
+                                    //  when column is nullable. Try to make 
more effective way
+                                    mcol[j + 
_right_col_idx]->insert_from(column, it->row_num);
+                                }
+                                it->visited = true;
+                            }
+                        }
+                    }
+                }
+            } else if constexpr (is_left_anti_join) {
+                if ((*null_map)[_probe_index]) { 
+                    ++repeat_count;
+                } else {
+                    auto find_result = 
key_getter.find_key(hash_table_ctx.hash_table, _probe_index, _arena);
+                    if (find_result.is_found()) { 
+                        //do nothing
+                    } else {
+                        ++repeat_count;
+                    }
+                }
+            } else if constexpr(is_left_semi_join) {

Review comment:
       why not contexpr more join type, like right semi/anti join ?

##########
File path: be/src/vec/core/block.h
##########
@@ -348,6 +348,13 @@ class MutableBlock {
         _data_types.clear();
     }
 
+    void append_from_block(const Block* block, const int col_len, const int 
start_col_idx, const size_t row_number) {

Review comment:
       the function do not call any where? should not add here

##########
File path: be/src/vec/exec/join/vhash_join_node.cpp
##########
@@ -168,46 +177,98 @@ struct ProcessHashTableProbe {
     // the output block struct is same with mutable block. we can do more opt 
on it and simplify
     // the logic of probe
     // TODO: opt the visited here to reduce the size of hash table
+    template<bool is_left_semi_join, bool is_left_anti_join, bool 
is_inner_join>
     Status do_process(HashTableContext& hash_table_ctx, ConstNullMapPtr 
null_map,
                       MutableBlock& mutable_block, Block* output_block) {
         using KeyGetter = typename HashTableContext::State;
         using Mapped = typename HashTableContext::Mapped;
 
         KeyGetter key_getter(_probe_raw_ptrs, _join_node->_probe_key_sz, 
nullptr);
-
+    
         std::vector<uint32_t> items_counts(_probe_rows);
         auto& mcol = mutable_block.mutable_columns();
-
-        int right_col_idx = _join_node->_is_right_semi_anti ? 0 : 
_left_table_data_types.size();
-        int right_col_len = _right_table_data_types.size();
         int current_offset = 0;
 
         for (; _probe_index < _probe_rows;) {
-            // ignore null rows
             if constexpr (ignore_null) {
                 if ((*null_map)[_probe_index]) {
                     items_counts[_probe_index++] = 0;
                     continue;
                 }
             }
-
             int repeat_count = 0;
-            auto find_result =
-                    (*null_map)[_probe_index]
+            if constexpr (is_inner_join) {
+                if (!(*null_map)[_probe_index]) {
+                    auto find_result = 
key_getter.find_key(hash_table_ctx.hash_table, _probe_index, _arena);
+
+                    if (find_result.is_found()) {
+                        auto& mapped = find_result.get_mapped();
+
+                        // TODO: Iterators are currently considered to be a 
heavy operation and have a certain impact on performance.
+                        // We should rethink whether to use this iterator mode 
in the future. Now just opt the one row case
+                        if (mapped.get_row_count() == 1) {
+                            mapped.visited = true;
+                            // right semi/anti join should dispose the data in 
hash table
+                            // after probe data eof
+                            ++repeat_count;
+                            for (size_t j = 0; j < _right_col_len; ++j) {
+                                auto& column = 
*mapped.block->get_by_position(j).column;
+                                mcol[j + _right_col_idx]->insert_from(column, 
mapped.row_num);
+                            }
+                        } else {
+                            if (_probe_index + 2 < _probe_rows)
+                                key_getter.prefetch(hash_table_ctx.hash_table, 
_probe_index + 2, _arena);
+                            for (auto it = mapped.begin(); it.ok(); ++it) {
+                                // right semi/anti join should dispose the 
data in hash table
+                                // after probe data eof
+                                ++repeat_count;
+                                for (size_t j = 0; j < _right_col_len; ++j) {
+                                    auto& column = 
*it->block->get_by_position(j).column;
+                                    // TODO: interface insert from cause 
serious performance problems
+                                    //  when column is nullable. Try to make 
more effective way
+                                    mcol[j + 
_right_col_idx]->insert_from(column, it->row_num);
+                                }
+                                it->visited = true;
+                            }
+                        }
+                    }
+                }
+            } else if constexpr (is_left_anti_join) {
+                if ((*null_map)[_probe_index]) { 
+                    ++repeat_count;
+                } else {
+                    auto find_result = 
key_getter.find_key(hash_table_ctx.hash_table, _probe_index, _arena);
+                    if (find_result.is_found()) { 
+                        //do nothing
+                    } else {
+                        ++repeat_count;
+                    }
+                }
+            } else if constexpr(is_left_semi_join) {
+                int repeat_count = 0;
+                if (!(*null_map)[_probe_index]) {
+                    auto find_result = 
key_getter.find_key(hash_table_ctx.hash_table, _probe_index, _arena);
+                    if (find_result.is_found()) { 
+                        ++repeat_count;
+                    }
+                }
+                items_counts[_probe_index++] = repeat_count;

Review comment:
       the code have in 311line




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to