liujiayi771 commented on code in PR #11391:
URL: 
https://github.com/apache/incubator-gluten/pull/11391#discussion_r2735270824


##########
backends-velox/src/test/scala/org/apache/spark/sql/execution/WriteUtils.scala:
##########
@@ -16,20 +16,51 @@
  */
 package org.apache.spark.sql.execution
 
+import org.apache.gluten.execution.VeloxColumnarToCarrierRowExec
+
 import org.apache.spark.sql.{DataFrame, GlutenQueryTest}
 import org.apache.spark.sql.catalyst.expressions.{BitwiseAnd, Expression, 
HiveHash, Literal, Pmod, UnsafeProjection}
 import org.apache.spark.sql.functions._
 import org.apache.spark.sql.test.SQLTestUtils
+import org.apache.spark.sql.util.QueryExecutionListener
 
 import java.io.File
 
-trait BucketWriteUtils extends GlutenQueryTest with SQLTestUtils {
+trait WriteUtils extends GlutenQueryTest with SQLTestUtils {
 
   def tableDir(table: String): File = {
     val identifier = spark.sessionState.sqlParser.parseTableIdentifier(table)
     new File(spark.sessionState.catalog.defaultTablePath(identifier))
   }
 
+  def checkNativeWrite(sqlStr: String, expectNative: Boolean = true): Unit = {
+    var nativeUsed = false
+    val queryListener = new QueryExecutionListener {
+      override def onFailure(f: String, qe: QueryExecution, e: Exception): 
Unit = {}
+      override def onSuccess(funcName: String, qe: QueryExecution, duration: 
Long): Unit = {
+        if (!nativeUsed) {
+          nativeUsed = if (isSparkVersionGE("3.4")) {
+            
qe.executedPlan.find(_.isInstanceOf[ColumnarWriteFilesExec]).isDefined

Review Comment:
   `qe.executedPlan.exists`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to