cloud-fan commented on code in PR #48867:
URL: https://github.com/apache/spark/pull/48867#discussion_r1846806346


##########
sql/core/src/test/scala/org/apache/spark/sql/connector/V1WriteFallbackSuite.scala:
##########
@@ -198,6 +202,38 @@ class V1WriteFallbackSuite extends QueryTest with 
SharedSparkSession with Before
       SparkSession.setDefaultSession(spark)
     }
   }
+
+  test("SPARK-50315: metrics for V1 fallback writers") {
+    val session = SparkSession.builder()
+      .master("local[1]")
+      .config(V2_SESSION_CATALOG_IMPLEMENTATION.key, 
classOf[V1FallbackTableCatalog].getName)
+      .getOrCreate()
+
+    def captureWrite(thunk: => Unit): SparkPlan = {
+      val physicalPlans = withPhysicalPlansCaptured(spark, thunk)
+      val v1FallbackWritePlans = physicalPlans.filter {
+        case _: AppendDataExecV1 | _: OverwriteByExpressionExecV1 => true
+        case _ => false
+      }
+
+      assert(v1FallbackWritePlans.size === 1)
+      v1FallbackWritePlans.head
+    }
+
+    val appendPlan = captureWrite {
+      val df = session.createDataFrame(Seq((1, "x")))
+      df.write.mode("append").option("name", 
"t1").format(v2Format).saveAsTable("test")
+    }
+    assert(appendPlan.metrics("numOutputRows").value === 1)
+
+    session.catalog.cacheTable("test")

Review Comment:
   why do we need to cache it?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to