amogh-jahagirdar commented on code in PR #6838:
URL: https://github.com/apache/iceberg/pull/6838#discussion_r1107848388


##########
spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/TestSparkWriteConfDistributionMode.java:
##########
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_HASH;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_NONE;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_RANGE;
+
+import java.util.Map;
+import org.apache.iceberg.DistributionMode;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestSparkWriteConfDistributionMode extends 
SparkTestBaseWithCatalog {
+
+  @After
+  public void cleanUp() {
+    spark.conf().unset(SparkSQLProperties.SESSION_WRITE_DISTRIBUTION_MODE);
+    sql("DROP TABLE IF EXISTS %s", tableName);
+  }
+
+  @Test
+  public void testSparkWriteConfDistributionDefault() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);
+
+    Table table = validationCatalog.loadTable(tableIdent);
+
+    SparkWriteConf writeConf = new SparkWriteConf(spark, table, 
ImmutableMap.of());
+    Assert.assertEquals(DistributionMode.NONE, writeConf.distributionMode());
+  }
+
+  @Test
+  public void testSparkWriteConfDistributionModeWithWriteOption() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);
+
+    Table table = validationCatalog.loadTable(tableIdent);
+
+    Map<String, String> writeOptions =
+        ImmutableMap.of(SparkWriteOptions.DISTRIBUTION_MODE, 
DistributionMode.HASH.modeName());
+    SparkWriteConf writeConf = new SparkWriteConf(spark, table, writeOptions);
+
+    Assert.assertEquals(DistributionMode.HASH, writeConf.distributionMode());
+  }
+
+  @Test
+  public void testSparkWriteConfDistributionModeWithSessionConfig() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);
+
+    Table table = validationCatalog.loadTable(tableIdent);
+
+    spark
+        .conf()
+        .set(SparkSQLProperties.SESSION_WRITE_DISTRIBUTION_MODE, 
DistributionMode.HASH.modeName());
+
+    SparkWriteConf writeConf = new SparkWriteConf(spark, table, 
ImmutableMap.of());
+    Assert.assertEquals(DistributionMode.HASH, writeConf.distributionMode());
+  }
+
+  @Test
+  public void testSparkWriteConfDistributionModeWithTableProperties() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);
+
+    Table table = validationCatalog.loadTable(tableIdent);
+
+    table.updateProperties().set(WRITE_DISTRIBUTION_MODE, 
WRITE_DISTRIBUTION_MODE_HASH).commit();
+
+    SparkWriteConf writeConf = new SparkWriteConf(spark, table, 
ImmutableMap.of());
+    Assert.assertEquals(DistributionMode.HASH, writeConf.distributionMode());
+  }
+
+  @Test
+  public void testSparkWriteConfDistributionModeWithTblPropAndSessionConfig() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);
+
+    Table table = validationCatalog.loadTable(tableIdent);
+
+    table.updateProperties().set(WRITE_DISTRIBUTION_MODE, 
WRITE_DISTRIBUTION_MODE_RANGE).commit();
+    spark
+        .conf()
+        .set(SparkSQLProperties.SESSION_WRITE_DISTRIBUTION_MODE, 
DistributionMode.HASH.modeName());
+
+    SparkWriteConf writeConf = new SparkWriteConf(spark, table, 
ImmutableMap.of());
+    // session config overwrite the table properties
+    Assert.assertEquals(DistributionMode.HASH, writeConf.distributionMode());
+  }
+
+  @Test
+  public void 
testSparkWriteConfDistributionModeWithWriteOptionAndSessionConfig() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);
+
+    Table table = validationCatalog.loadTable(tableIdent);
+
+    Map<String, String> writeOptions =
+        ImmutableMap.of(SparkWriteOptions.DISTRIBUTION_MODE, 
DistributionMode.RANGE.modeName());
+    spark
+        .conf()
+        .set(SparkSQLProperties.SESSION_WRITE_DISTRIBUTION_MODE, 
DistributionMode.HASH.modeName());
+
+    SparkWriteConf writeConf = new SparkWriteConf(spark, table, writeOptions);
+    // write options overwrite the session config
+    Assert.assertEquals(DistributionMode.RANGE, writeConf.distributionMode());
+  }
+
+  @Test
+  public void testSparkWriteConfDistributionModeWithEverything() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);
+
+    Table table = validationCatalog.loadTable(tableIdent);
+
+    Map<String, String> writeOptions =
+        ImmutableMap.of(SparkWriteOptions.DISTRIBUTION_MODE, 
DistributionMode.RANGE.modeName());
+    spark
+        .conf()
+        .set(SparkSQLProperties.SESSION_WRITE_DISTRIBUTION_MODE, 
DistributionMode.HASH.modeName());

Review Comment:
   Nit could we have a newline after line 162 before the 
table.updateProperties(), makes it easier to read IMO.



##########
spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/TestSparkWriteConfDistributionMode.java:
##########
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_HASH;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_NONE;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_RANGE;
+
+import java.util.Map;
+import org.apache.iceberg.DistributionMode;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestSparkWriteConfDistributionMode extends 
SparkTestBaseWithCatalog {

Review Comment:
   Any reason we need a separate test class and not just add to 
`TestSparkDistributionAndOrderingUtil` ? 



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java:
##########
@@ -47,4 +47,8 @@ private SparkSQLProperties() {}
   public static final String PRESERVE_DATA_GROUPING =
       "spark.sql.iceberg.planning.preserve-data-grouping";
   public static final boolean PRESERVE_DATA_GROUPING_DEFAULT = false;
+
+  // Influence write distribution mode using spark SQL config

Review Comment:
   Nit could the comment just be `Control write distribution mode` similar 
pattern to the existing configs, it's explanatory from the class itself that 
it's for SparkSQL properties. 



##########
spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/TestSparkWriteConfDistributionMode.java:
##########
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_HASH;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_NONE;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_RANGE;
+
+import java.util.Map;
+import org.apache.iceberg.DistributionMode;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestSparkWriteConfDistributionMode extends 
SparkTestBaseWithCatalog {
+
+  @After
+  public void cleanUp() {
+    spark.conf().unset(SparkSQLProperties.SESSION_WRITE_DISTRIBUTION_MODE);
+    sql("DROP TABLE IF EXISTS %s", tableName);
+  }
+
+  @Test
+  public void testSparkWriteConfDistributionDefault() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);
+
+    Table table = validationCatalog.loadTable(tableIdent);
+
+    SparkWriteConf writeConf = new SparkWriteConf(spark, table, 
ImmutableMap.of());
+    Assert.assertEquals(DistributionMode.NONE, writeConf.distributionMode());
+  }
+
+  @Test
+  public void testSparkWriteConfDistributionModeWithWriteOption() {
+    sql(
+        "CREATE TABLE %s (id BIGINT, data STRING, date DATE, ts TIMESTAMP) "
+            + "USING iceberg "
+            + "PARTITIONED BY (date, days(ts))",
+        tableName);

Review Comment:
   I think this can be in an `@Before`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to