xiarixiaoyao commented on a change in pull request #4013:
URL: https://github.com/apache/hudi/pull/4013#discussion_r753757069
##########
File path:
hudi-common/src/main/java/org/apache/hudi/common/model/HoodieColumnRangeMetadata.java
##########
@@ -30,16 +28,21 @@
private final String columnName;
private final T minValue;
private final T maxValue;
- private final long numNulls;
- private final PrimitiveStringifier stringifier;
+ private long numNulls;
+ // For Decimal Type/Date Type, minValue/maxValue cannot represent it's
original value.
+ // eg: when parquet collects column information, the decimal type is
collected as int/binary type.
+ // so we cannot use minValue and maxValue directly, use
minValueAsString/maxValueAsString instead.
+ private final String minValueAsString;
+ private final String maxValueAsString;
- public HoodieColumnRangeMetadata(final String filePath, final String
columnName, final T minValue, final T maxValue, final long numNulls, final
PrimitiveStringifier stringifier) {
+ public HoodieColumnRangeMetadata(final String filePath, final String
columnName, final T minValue, final T maxValue, long numNulls, final String
minValueAsString, final String maxValueAsString) {
this.filePath = filePath;
this.columnName = columnName;
this.minValue = minValue;
this.maxValue = maxValue;
- this.numNulls = numNulls;
- this.stringifier = stringifier;
+ this.numNulls = numNulls == -1 ? 0 : numNulls;
Review comment:
yes, we have already throw exceptions in L264 in
ZCurveOptimizeHelper.getMinMaxValue
##########
File path:
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/TestOptimizeTable.scala
##########
@@ -88,19 +88,28 @@ class TestOptimizeTable extends HoodieClientTestBase {
.save(basePath)
assertEquals(1000, spark.read.format("hudi").load(basePath).count())
+ // use unsorted col as filter.
assertEquals(1000,
- spark.read.option(DataSourceReadOptions.ENABLE_DATA_SKIPPING.key(),
"true").format("hudi").load(basePath).count())
+ spark.read.option(DataSourceReadOptions.ENABLE_DATA_SKIPPING.key(),
"true")
+ .format("hudi").load(basePath).where("end_lat >= 0").count())
+ // use sorted col as filter.
+ assertEquals(1000,
+ spark.read.option(DataSourceReadOptions.ENABLE_DATA_SKIPPING.key(),
"true")
+ .format("hudi").load(basePath).where("begin_lon >= 0").count())
}
@Test
def testCollectMinMaxStatistics(): Unit = {
val testPath = new Path(System.getProperty("java.io.tmpdir"), "minMax")
val statisticPath = new Path(System.getProperty("java.io.tmpdir"), "stat")
val fs = testPath.getFileSystem(spark.sparkContext.hadoopConfiguration)
+ val complexDataFrame = createComplexDataFrame(spark)
+
complexDataFrame.repartition(3).write.mode("overwrite").save(testPath.toString)
+ val df = spark.read.load(testPath.toString)
try {
- val complexDataFrame = createComplexDataFrame(spark)
-
complexDataFrame.repartition(3).write.mode("overwrite").save(testPath.toString)
- val df = spark.read.load(testPath.toString)
+ // test z-order sort for all primitive type, should not throw error.
Review comment:
fixed.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]