This is an automated email from the ASF dual-hosted git repository.

jackye pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git


The following commit(s) were added to refs/heads/master by this push:
     new a31f941a38 Core: Minor refactoring of PartitionsTable (#6975)
a31f941a38 is described below

commit a31f941a387c06878f9186704bb1cb0d2759c93b
Author: Ajantha Bhat <[email protected]>
AuthorDate: Tue Mar 21 08:19:44 2023 +0530

    Core: Minor refactoring of PartitionsTable (#6975)
---
 .../java/org/apache/iceberg/PartitionsTable.java   | 24 ++++++++++++----------
 .../org/apache/iceberg/TestMetadataTableScans.java |  3 ++-
 .../spark/source/TestIcebergSourceTablesBase.java  |  4 ++--
 .../spark/source/TestIcebergSourceTablesBase.java  |  4 ++--
 .../spark/source/TestIcebergSourceTablesBase.java  |  4 ++--
 .../spark/source/TestIcebergSourceTablesBase.java  |  4 ++--
 6 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/core/src/main/java/org/apache/iceberg/PartitionsTable.java 
b/core/src/main/java/org/apache/iceberg/PartitionsTable.java
index 4e2288a332..5a289d756c 100644
--- a/core/src/main/java/org/apache/iceberg/PartitionsTable.java
+++ b/core/src/main/java/org/apache/iceberg/PartitionsTable.java
@@ -45,9 +45,11 @@ public class PartitionsTable extends BaseMetadataTable {
     this.schema =
         new Schema(
             Types.NestedField.required(1, "partition", 
Partitioning.partitionType(table)),
-            Types.NestedField.required(2, "record_count", 
Types.LongType.get()),
-            Types.NestedField.required(3, "file_count", 
Types.IntegerType.get()),
-            Types.NestedField.required(4, "spec_id", Types.IntegerType.get()));
+            Types.NestedField.required(4, "spec_id", Types.IntegerType.get()),
+            Types.NestedField.required(
+                2, "record_count", Types.LongType.get(), "Count of records in 
data files"),
+            Types.NestedField.required(
+                3, "file_count", Types.IntegerType.get(), "Count of data 
files"));
   }
 
   @Override
@@ -77,7 +79,7 @@ public class PartitionsTable extends BaseMetadataTable {
           schema(),
           scan.schema(),
           partitions,
-          root -> StaticDataTask.Row.of(root.recordCount, root.fileCount));
+          root -> StaticDataTask.Row.of(root.dataRecordCount, 
root.dataFileCount));
     } else {
       return StaticDataTask.of(
           
io().newInputFile(table().operations().current().metadataFileLocation()),
@@ -90,7 +92,7 @@ public class PartitionsTable extends BaseMetadataTable {
 
   private static StaticDataTask.Row convertPartition(Partition partition) {
     return StaticDataTask.Row.of(
-        partition.key, partition.recordCount, partition.fileCount, 
partition.specId);
+        partition.key, partition.specId, partition.dataRecordCount, 
partition.dataFileCount);
   }
 
   private static Iterable<Partition> partitions(Table table, StaticTableScan 
scan) {
@@ -220,20 +222,20 @@ public class PartitionsTable extends BaseMetadataTable {
 
   static class Partition {
     private final StructLike key;
-    private long recordCount;
-    private int fileCount;
     private int specId;
+    private long dataRecordCount;
+    private int dataFileCount;
 
     Partition(StructLike key) {
       this.key = key;
-      this.recordCount = 0;
-      this.fileCount = 0;
       this.specId = 0;
+      this.dataRecordCount = 0;
+      this.dataFileCount = 0;
     }
 
     void update(DataFile file) {
-      this.recordCount += file.recordCount();
-      this.fileCount += 1;
+      this.dataRecordCount += file.recordCount();
+      this.dataFileCount += 1;
       this.specId = file.specId();
     }
   }
diff --git a/core/src/test/java/org/apache/iceberg/TestMetadataTableScans.java 
b/core/src/test/java/org/apache/iceberg/TestMetadataTableScans.java
index 1ba141f7b6..c45da54f03 100644
--- a/core/src/test/java/org/apache/iceberg/TestMetadataTableScans.java
+++ b/core/src/test/java/org/apache/iceberg/TestMetadataTableScans.java
@@ -332,7 +332,8 @@ public class TestMetadataTableScans extends 
MetadataTableScanTestBase {
 
     Table partitionsTable = new PartitionsTable(table);
     Types.StructType expected =
-        new Schema(required(3, "file_count", 
Types.IntegerType.get())).asStruct();
+        new Schema(required(3, "file_count", Types.IntegerType.get(), "Count 
of data files"))
+            .asStruct();
 
     TableScan scanWithProjection = 
partitionsTable.newScan().select("file_count");
     Assert.assertEquals(expected, scanWithProjection.schema().asStruct());
diff --git 
a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
 
b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
index 07d77dc99b..bf7e6e0960 100644
--- 
a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
+++ 
b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
@@ -1234,8 +1234,8 @@ public abstract class TestIcebergSourceTablesBase extends 
SparkTestBase {
 
     Types.StructType expectedSchema =
         Types.StructType.of(
-            required(2, "record_count", Types.LongType.get()),
-            required(3, "file_count", Types.IntegerType.get()));
+            required(2, "record_count", Types.LongType.get(), "Count of 
records in data files"),
+            required(3, "file_count", Types.IntegerType.get(), "Count of data 
files"));
 
     Table partitionsTable = loadTable(tableIdentifier, "partitions");
 
diff --git 
a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
 
b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
index c51470f19c..634b53c3d9 100644
--- 
a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
+++ 
b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
@@ -1232,8 +1232,8 @@ public abstract class TestIcebergSourceTablesBase extends 
SparkTestBase {
 
     Types.StructType expectedSchema =
         Types.StructType.of(
-            required(2, "record_count", Types.LongType.get()),
-            required(3, "file_count", Types.IntegerType.get()));
+            required(2, "record_count", Types.LongType.get(), "Count of 
records in data files"),
+            required(3, "file_count", Types.IntegerType.get(), "Count of data 
files"));
 
     Table partitionsTable = loadTable(tableIdentifier, "partitions");
 
diff --git 
a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
 
b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
index 3abebb83ac..f3ab50b755 100644
--- 
a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
+++ 
b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
@@ -1251,8 +1251,8 @@ public abstract class TestIcebergSourceTablesBase extends 
SparkTestBase {
 
     Types.StructType expectedSchema =
         Types.StructType.of(
-            required(2, "record_count", Types.LongType.get()),
-            required(3, "file_count", Types.IntegerType.get()));
+            required(2, "record_count", Types.LongType.get(), "Count of 
records in data files"),
+            required(3, "file_count", Types.IntegerType.get(), "Count of data 
files"));
 
     Table partitionsTable = loadTable(tableIdentifier, "partitions");
 
diff --git 
a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
 
b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
index 178c52b840..123c06b7e5 100644
--- 
a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
+++ 
b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestIcebergSourceTablesBase.java
@@ -1253,8 +1253,8 @@ public abstract class TestIcebergSourceTablesBase extends 
SparkTestBase {
 
     Types.StructType expectedSchema =
         Types.StructType.of(
-            required(2, "record_count", Types.LongType.get()),
-            required(3, "file_count", Types.IntegerType.get()));
+            required(2, "record_count", Types.LongType.get(), "Count of 
records in data files"),
+            required(3, "file_count", Types.IntegerType.get(), "Count of data 
files"));
 
     Table partitionsTable = loadTable(tableIdentifier, "partitions");
 

Reply via email to