nsivabalan commented on a change in pull request #2168:
URL: https://github.com/apache/hudi/pull/2168#discussion_r540483487



##########
File path: 
hudi-integ-test/src/main/java/org/apache/hudi/integ/testsuite/dag/nodes/ValidateDatasetNode.java
##########
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.integ.testsuite.dag.nodes;
+
+import org.apache.hudi.DataSourceWriteOptions;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.integ.testsuite.configuration.DeltaConfig.Config;
+import org.apache.hudi.integ.testsuite.dag.ExecutionContext;
+import org.apache.hudi.integ.testsuite.schema.SchemaUtils;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.spark.api.java.function.MapFunction;
+import org.apache.spark.api.java.function.ReduceFunction;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Encoders;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.catalyst.analysis.SimpleAnalyzer$;
+import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder;
+import org.apache.spark.sql.catalyst.encoders.RowEncoder;
+import org.apache.spark.sql.catalyst.expressions.Attribute;
+import org.apache.spark.sql.types.StructType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+import scala.Tuple2;
+import scala.collection.JavaConversions;
+import scala.collection.JavaConverters;
+
+/**
+ * This nodes validates contents from input path are in tact with Hudi. This 
nodes uses spark datasource for comparison purposes. By default no configs are 
required for this node. But there is an
+ * optional config "delete_input_data" that you can set for this node. If set, 
once validation completes, contents from inputPath are deleted. This will come 
in handy for long running test suites.
+ * README has more details under docker set up for usages of this node.
+ */
+public class ValidateDatasetNode extends DagNode<Boolean> {
+
+  private static Logger log = 
LoggerFactory.getLogger(ValidateDatasetNode.class);
+
+  public ValidateDatasetNode(Config config) {
+    this.config = config;
+  }
+
+  @Override
+  public void execute(ExecutionContext context) throws Exception {
+
+    SparkSession session = 
SparkSession.builder().sparkContext(context.getJsc().sc()).getOrCreate();
+
+    // todo: Fix partitioning schemes. For now, assumes data based 
partitioning.
+    String inputPath = 
context.getHoodieTestSuiteWriter().getCfg().inputBasePath + "/*/*";
+    String hudiPath = 
context.getHoodieTestSuiteWriter().getCfg().targetBasePath + "/*/*/*";
+    log.warn("ValidateDataset Node: Input path " + inputPath + ", hudi path " 
+ hudiPath);
+    // listing batches to be validated
+    String inputPathStr = 
context.getHoodieTestSuiteWriter().getCfg().inputBasePath;
+    FileSystem fs = new Path(inputPathStr)
+        .getFileSystem(context.getHoodieTestSuiteWriter().getConfiguration());
+    FileStatus[] fileStatuses = fs.listStatus(new Path(inputPathStr));
+    for (FileStatus fileStatus : fileStatuses) {
+      log.debug("Listing all Micro batches to be validated :: " + 
fileStatus.getPath().toString());
+    }
+
+    String recordKeyField = 
context.getWriterContext().getProps().getString(DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY());

Review comment:
       nope. just the record key and partition path field names. we need to 
group by Hoodiekey in df 
   
   ```
   inputDf.groupByKey(
           (MapFunction<Row, String>) value -> partitionPathField + "+" + 
recordKeyField, Encoders.STRING())
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to