yikf commented on code in PR #8127: URL: https://github.com/apache/incubator-gluten/pull/8127#discussion_r1895364323
########## backends-velox/src/main/scala/org/apache/spark/sql/execution/unsafe/UnsafeColumnarBuildSideRelation.scala: ########## @@ -0,0 +1,328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.unsafe + +import org.apache.gluten.backendsapi.BackendsApiManager +import org.apache.gluten.columnarbatch.ColumnarBatches +import org.apache.gluten.iterator.Iterators +import org.apache.gluten.memory.arrow.alloc.ArrowBufferAllocators +import org.apache.gluten.runtime.Runtimes +import org.apache.gluten.sql.shims.SparkShimLoader +import org.apache.gluten.utils.ArrowAbiUtil +import org.apache.gluten.vectorized.{ColumnarBatchSerializerJniWrapper, NativeColumnarToRowJniWrapper} + +import org.apache.spark.{SparkEnv, TaskContext} +import org.apache.spark.internal.Logging +import org.apache.spark.internal.config.MEMORY_OFFHEAP_ENABLED +import org.apache.spark.memory.{TaskMemoryManager, UnifiedMemoryManager} +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression, UnsafeProjection, UnsafeRow} +import org.apache.spark.sql.catalyst.plans.physical.{BroadcastMode, IdentityBroadcastMode} +import org.apache.spark.sql.execution.joins.{BuildSideRelation, HashedRelationBroadcastMode} +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.utils.SparkArrowUtil +import org.apache.spark.sql.vectorized.ColumnarBatch +import org.apache.spark.task.TaskResources +import org.apache.spark.util.Utils + +import com.esotericsoftware.kryo.{Kryo, KryoSerializable} +import com.esotericsoftware.kryo.io.{Input, Output} +import org.apache.arrow.c.ArrowSchema + +import java.io.{Externalizable, ObjectInput, ObjectOutput} + +import scala.collection.JavaConverters.asScalaIteratorConverter + +/** + * UnsafeColumnarBuildSideRelation should backed by offheap to avoid on-heap oom. Almost the same as + * ColumnarBuildSideRelation, we should remove ColumnarBuildSideRelation when + * UnsafeColumnarBuildSideRelation get matured. + * + * @param output Review Comment: nit, need comments. ########## backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCHSuite.scala: ########## @@ -305,6 +305,18 @@ class VeloxTPCHV1BhjSuite extends VeloxTPCHSuite { } } +/** BroadcastBuildSideRelation use onheap. */ +class VeloxTPCHV1BhjOnheapSuite extends VeloxTPCHSuite { + override def subType(): String = "v1-bhj-offheap" Review Comment: v1-bhj-onheap? ########## backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxSparkPlanExecApi.scala: ########## @@ -633,7 +637,13 @@ class VeloxSparkPlanExecApi extends SparkPlanExecApi { } numOutputRows += serialized.map(_.getNumRows).sum dataSize += rawSize - ColumnarBuildSideRelation(child.output, serialized.map(_.getSerialized), mode) + if (useOffheapBroadcastBuildRelation) { + TaskResources.runUnsafe { Review Comment: Do we still need `TaskResources.runUnsafe`? ########## backends-velox/src/test/scala/org/apache/gluten/execution/VeloxHashJoinSuite.scala: ########## @@ -114,71 +115,84 @@ class VeloxHashJoinSuite extends VeloxWholeStageTransformerSuite { } test("Reuse broadcast exchange for different build keys with same table") { - withTable("t1", "t2") { - spark.sql(""" - |CREATE TABLE t1 USING PARQUET - |AS SELECT id as c1, id as c2 FROM range(10) - |""".stripMargin) - - spark.sql(""" - |CREATE TABLE t2 USING PARQUET - |AS SELECT id as c1, id as c2 FROM range(3) - |""".stripMargin) - - val df = spark.sql(""" - |SELECT * FROM t1 - |JOIN t2 as tmp1 ON t1.c1 = tmp1.c1 and tmp1.c1 = tmp1.c2 - |JOIN t2 as tmp2 on t1.c2 = tmp2.c2 and tmp2.c1 = tmp2.c2 - |""".stripMargin) - - assert(collect(df.queryExecution.executedPlan) { - case b: BroadcastExchangeExec => b - }.size == 2) - - checkAnswer( - df, - Row(2, 2, 2, 2, 2, 2) :: Row(1, 1, 1, 1, 1, 1) :: Row(0, 0, 0, 0, 0, 0) :: Nil) - - assert(collect(df.queryExecution.executedPlan) { - case b: ColumnarBroadcastExchangeExec => b - }.size == 1) - assert(collect(df.queryExecution.executedPlan) { - case r @ ReusedExchangeExec(_, _: ColumnarBroadcastExchangeExec) => r - }.size == 1) + for (enabledOffheapBroadcast <- Seq("true", "false")) { + withSQLConf( + GlutenConfig.VELOX_BROADCAST_BUILD_RELATION_USE_OFFHEAP.key -> enabledOffheapBroadcast) { + withTable("t1", "t2") { + spark.sql(""" + |CREATE TABLE t1 USING PARQUET + |AS SELECT id as c1, id as c2 FROM range(10) + |""".stripMargin) + + spark.sql(""" + |CREATE TABLE t2 USING PARQUET + |AS SELECT id as c1, id as c2 FROM range(3) + |""".stripMargin) + + val df = spark.sql(""" + |SELECT * FROM t1 + |JOIN t2 as tmp1 ON t1.c1 = tmp1.c1 and tmp1.c1 = tmp1.c2 + |JOIN t2 as tmp2 on t1.c2 = tmp2.c2 and tmp2.c1 = tmp2.c2 + |""".stripMargin) + + assert(collect(df.queryExecution.executedPlan) { + case b: BroadcastExchangeExec => b + }.size == 2) + + checkAnswer( + df, + Row(2, 2, 2, 2, 2, 2) :: Row(1, 1, 1, 1, 1, 1) :: Row(0, 0, 0, 0, 0, 0) :: Nil) + + assert(collect(df.queryExecution.executedPlan) { + case b: ColumnarBroadcastExchangeExec => b + }.size == 1) + assert(collect(df.queryExecution.executedPlan) { + case r @ ReusedExchangeExec(_, _: ColumnarBroadcastExchangeExec) => r + }.size == 1) + } + } } } test("ColumnarBuildSideRelation transform support multiple key columns") { - withTable("t1", "t2") { - val df1 = - (0 until 50).map(i => (i % 2, i % 3, s"${i % 25}")).toDF("t1_c1", "t1_c2", "date").as("df1") - val df2 = (0 until 50) - .map(i => (i % 11, i % 13, s"${i % 10}")) - .toDF("t2_c1", "t2_c2", "date") - .as("df2") - df1.write.partitionBy("date").saveAsTable("t1") - df2.write.partitionBy("date").saveAsTable("t2") - - val df = sql(""" - |SELECT t1.date, t1.t1_c1, t2.t2_c2 - |FROM t1 - |JOIN t2 ON t1.date = t2.date - |WHERE t1.date=if(3 <= t2.t2_c2, if(3 < t2.t2_c1, 3, t2.t2_c1), t2.t2_c2) - |ORDER BY t1.date DESC, t1.t1_c1 DESC, t2.t2_c2 DESC - |LIMIT 1 - |""".stripMargin) - - checkAnswer(df, Row("3", 1, 4) :: Nil) - // collect the DPP plan. - val subqueryBroadcastExecs = collectWithSubqueries(df.queryExecution.executedPlan) { - case subqueryBroadcast: ColumnarSubqueryBroadcastExec => subqueryBroadcast + for (enabledOffheapBroadcast <- Seq("true", "false")) { Review Comment: ```scala Seq(true, false).foreach { enabledOffheapBroadcast => xxx } ``` ########## shims/common/src/main/scala/org/apache/gluten/GlutenConfig.scala: ########## @@ -2250,4 +2253,12 @@ object GlutenConfig { .doc("If enabled, gluten will convert the viewfs path to hdfs path in scala side") .booleanConf .createWithDefault(false) + + val VELOX_BROADCAST_BUILD_RELATION_USE_OFFHEAP = + buildConf("spark.gluten.velox.offHeapBroadcastBuildRelation.enabled") + .internal() + .doc("If enabled, broadcast build relation will use offheap memory. " + + "Otherwise, broadcast build relation will use onheap memory.") + .booleanConf + .createWithDefault(true) Review Comment: Let's default it to false, and need a smooth transition period? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
