This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.1 by this push:
new e5219467dd0 [Bug](join) avoid overflow on bucket_size+1 (#37807)
e5219467dd0 is described below
commit e5219467dd0f812701b81dafa110ec61bc4917d2
Author: Pxl <[email protected]>
AuthorDate: Mon Jul 15 18:47:36 2024 +0800
[Bug](join) avoid overflow on bucket_size+1 (#37807)
## Proposed changes
pick from #37493
---
be/src/vec/common/hash_table/join_hash_table.h | 2 +-
.../query_p2/big_join_build/big_join_build.out | 4 ++
.../query_p2/big_join_build/big_join_build.groovy | 60 ++++++++++++++++++++++
3 files changed, 65 insertions(+), 1 deletion(-)
diff --git a/be/src/vec/common/hash_table/join_hash_table.h
b/be/src/vec/common/hash_table/join_hash_table.h
index 99ce2d13b48..317987541cd 100644
--- a/be/src/vec/common/hash_table/join_hash_table.h
+++ b/be/src/vec/common/hash_table/join_hash_table.h
@@ -38,7 +38,7 @@ public:
static uint32_t calc_bucket_size(size_t num_elem) {
size_t expect_bucket_size = num_elem + (num_elem - 1) / 7;
return std::min(phmap::priv::NormalizeCapacity(expect_bucket_size) + 1,
-
static_cast<size_t>(std::numeric_limits<uint32_t>::max()));
+
static_cast<size_t>(std::numeric_limits<int32_t>::max()) + 1);
}
size_t get_byte_size() const {
diff --git a/regression-test/data/query_p2/big_join_build/big_join_build.out
b/regression-test/data/query_p2/big_join_build/big_join_build.out
new file mode 100644
index 00000000000..4e55f55863d
--- /dev/null
+++ b/regression-test/data/query_p2/big_join_build/big_join_build.out
@@ -0,0 +1,4 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !sql --
+97656250
+
diff --git
a/regression-test/suites/query_p2/big_join_build/big_join_build.groovy
b/regression-test/suites/query_p2/big_join_build/big_join_build.groovy
new file mode 100644
index 00000000000..a1a1e906e67
--- /dev/null
+++ b/regression-test/suites/query_p2/big_join_build/big_join_build.groovy
@@ -0,0 +1,60 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("big_join_build") {
+
+ sql """ DROP TABLE IF EXISTS b_table; """
+ sql """ DROP TABLE IF EXISTS p_table; """
+
+ sql """
+ create table b_table (
+ k1 tinyint not null,
+ )
+ duplicate key (k1)
+ distributed BY hash(k1) buckets 64
+ properties("replication_num" = "1");
+ """
+ sql """
+ create table p_table (
+ k1 tinyint not null,
+ )
+ duplicate key (k1)
+ distributed BY hash(k1) buckets 64
+ properties("replication_num" = "1");
+ """
+ sql """
+ insert into p_table select * from numbers("number" = "5");
+ """
+ sql """
+ insert into b_table select * from numbers("number" = "1000000000");
+ """
+ sql """
+ insert into b_table select * from numbers("number" = "1000000000");
+ """
+ sql """
+ insert into b_table select * from numbers("number" = "1000000000");
+ """
+ sql """
+ insert into b_table select * from numbers("number" = "1000000000");
+ """
+ sql """
+ insert into b_table select * from numbers("number" = "1000000000");
+ """
+
+ qt_sql"""select /*+ leading(p_table b_table) */ count(*) from
p_table,b_table where p_table.k1=b_table.k1 and b_table.k1<91;"""
+}
+
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]