HappenLee commented on a change in pull request #7972:
URL: https://github.com/apache/incubator-doris/pull/7972#discussion_r805489166
##########
File path: be/src/vec/exec/vset_operation_node.cpp
##########
@@ -237,9 +242,22 @@ Status VSetOperationNode::hash_table_build(RuntimeState*
state) {
_mem_used += allocated_bytes;
RETURN_IF_LIMIT_EXCEEDED(state, "Set Operation Node, while getting
next from the child 0.");
- RETURN_IF_ERROR(process_build_block(block));
- RETURN_IF_LIMIT_EXCEEDED(state, "Set Operation Node, while
constructing the hash table.");
+ if (block.rows() != 0) {
+ int i = 0;
+ for (const auto &data : block) {
+ const auto & column = *data.column.get();
+ columns[i].get()->insert_range_from(column, 0, block.rows());
Review comment:
it may cause string column alloc to many mem one time,you should give a
way to dispose the case
##########
File path: be/src/vec/exec/join/vhash_join_node.cpp
##########
@@ -711,45 +766,62 @@ Status HashJoinNode::get_next(RuntimeState* state, Block*
output_block, bool* eo
Status st;
if (_probe_index < _probe_block.rows()) {
- MutableBlock mutable_block = (output_block->mem_reuse() &&
!_have_other_join_conjunct) ?
- MutableBlock(output_block) :
- MutableBlock(VectorizedUtils::create_empty_columnswithtypename(
- !_have_other_join_conjunct ? row_desc() :
_row_desc_for_other_join_conjunt));
-
std::visit(
- [&](auto&& arg) {
+ [&](auto&& arg, auto have_other_join_conjunct,
+ auto is_left_semi_join, auto is_left_anti_join, auto
is_inner_join) {
Review comment:
You need to represent this logic more abstractly. This will only lead to
the overhead of only one type judgment for a block, and will not affect the
performance
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]