codope commented on code in PR #8298:
URL: https://github.com/apache/hudi/pull/8298#discussion_r1148810977


##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/SparkHoodieTableFileIndex.scala:
##########
@@ -281,14 +281,15 @@ class SparkHoodieTableFileIndex(spark: SparkSession,
     // Static partition-path prefix is defined as a prefix of the full 
partition-path where only
     // first N partition columns (in-order) have proper (static) values bound 
in equality predicates,
     // allowing in turn to build such prefix to be used in subsequent filtering
-    val staticPartitionColumnNameValuePairs: Seq[(String, Any)] = {
+    val staticPartitionColumnNameValuePairs: Seq[(String, (String, Any))] = {
       // Extract from simple predicates of the form `date = '2022-01-01'` both
       // partition column and corresponding (literal) value
-      val staticPartitionColumnValuesMap = 
extractEqualityPredicatesLiteralValues(partitionColumnPredicates)
+      val zoneId = configProperties.getString(DateTimeUtils.TIMEZONE_OPTION, 
SQLConf.get.sessionLocalTimeZone)

Review Comment:
   Why not use the `HoodieTableConfig#TIMELINE_TIMEZONE`?



##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/TestLazyPartitionPathFetching.scala:
##########
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.hudi
+
+class TestLazyPartitionPathFetching extends HoodieSparkSqlTestBase {
+
+  test("Test querying with string column + partition pruning") {
+    withTempDir { tmp =>
+      val tableName = generateTableName
+      spark.sql(
+        s"""
+           |create table $tableName (
+           |  id int,
+           |  name string,
+           |  price double,
+           |  ts long,
+           |  grass_date string
+           |) using hudi
+           | location '${tmp.getCanonicalPath}'
+           | tblproperties (
+           |  primaryKey ='id',
+           |  type = 'cow',
+           |  preCombineField = 'ts'
+           | )
+           | PARTITIONED BY (grass_date)
+       """.stripMargin)
+      spark.sql(s"insert into $tableName values(1, 'a1', 10, 1000, 
date('2023-02-27'))")
+      spark.sql(s"insert into $tableName values(2, 'a2', 10, 1000, 
date('2023-02-28'))")
+      spark.sql(s"insert into $tableName values(3, 'a3', 10, 1000, 
date('2023-03-01'))")
+
+      checkAnswer(s"select id, name, price, ts from $tableName where 
grass_date = '2023-03-01' order by id")(
+        Seq(3, "a3", 10.0, 1000)
+      )
+    }
+  }
+
+  test("Test querying with date column + partition pruning") {
+    withTempDir { tmp =>
+      val tableName = generateTableName
+      spark.sql(
+        s"""
+           |create table $tableName (
+           |  id int,
+           |  name string,
+           |  price double,
+           |  ts long,
+           |  grass_date date
+           |) using hudi
+           | location '${tmp.getCanonicalPath}'
+           | tblproperties (
+           |  primaryKey ='id',
+           |  type = 'cow',
+           |  preCombineField = 'ts'
+           | )
+           | PARTITIONED BY (grass_date)
+       """.stripMargin)
+      spark.sql(s"insert into $tableName values(1, 'a1', 10, 1000, 
date('2023-02-27'))")
+      spark.sql(s"insert into $tableName values(2, 'a2', 10, 1000, 
date('2023-02-28'))")
+      spark.sql(s"insert into $tableName values(3, 'a3', 10, 1000, 
date('2023-03-01'))")
+
+      checkAnswer(s"select id, name, price, ts from $tableName where 
grass_date = date'2023-03-01' order by id")(
+        Seq(3, "a3", 10.0, 1000)
+      )
+    }
+  }
+
+  test("Test querying with date column + partition pruning (multi-level 
partitioning)") {
+    withTempDir { tmp =>
+      val tableName = generateTableName
+      spark.sql(
+        s"""
+           |create table $tableName (
+           |  id int,
+           |  name string,
+           |  price double,
+           |  ts long,
+           |  grass_region string,
+           |  grass_date date
+           |) using hudi
+           | location '${tmp.getCanonicalPath}'
+           | tblproperties (
+           |  primaryKey ='id',
+           |  type = 'cow',
+           |  preCombineField = 'ts'
+           | )
+           | PARTITIONED BY (grass_region, grass_date)
+         """.stripMargin)
+      spark.sql(s"set schema.on.read.enable=true")
+      spark.sql(s"insert into $tableName values(1, 'a1', 10, 1000, 'ID', 
date('2023-02-27'))")
+      spark.sql(s"insert into $tableName values(2, 'a2', 10, 1000, 'ID', 
date('2023-02-28'))")
+      spark.sql(s"insert into $tableName values(3, 'a3', 10, 1000, 'ID', 
date('2023-03-01'))")
+
+      // test (A) that is expected to fail
+      // must execute this first to prevent caching
+      // steps into prunePartition, causing partitionPath to be incorrectly 
constructed
+      checkAnswer(s"select id, name, price, ts from $tableName " +
+        s"where grass_date = date'2023-03-01' and grass_region='ID' order by 
id")(
+        Seq(3, "a3", 10.0, 1000)
+      )
+
+      // test (B) that is expected to pass
+      // if we execute the test (B) first, cached listings will be used, test 
(A) will pass
+      // no error
+      checkAnswer(s"select id, name, price, ts from $tableName where 
grass_date = date'2023-03-01' order by id")(
+        Seq(3, "a3", 10.0, 1000)
+      )
+
+      // TLDR:
+      // execution order of [B] = pass
+      // execution order of [B, A] = pass
+      // execution order of [A] = fail
+      // execution order of [A, B] = fail

Review Comment:
   remove this comment and add it inline where each part or the combination of 
different execution orders is being tested.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to