nsivabalan commented on code in PR #8520:
URL: https://github.com/apache/hudi/pull/8520#discussion_r1173171771
##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/ChainedTransformer.java:
##########
@@ -46,9 +53,33 @@ public List<String> getTransformersNames() {
@Override
public Dataset<Row> apply(JavaSparkContext jsc, SparkSession sparkSession,
Dataset<Row> rowDataset, TypedProperties properties) {
Dataset<Row> dataset = rowDataset;
+ boolean isErrorTableEnabled =
properties.getBoolean(ERROR_TABLE_ENABLED.key(),
ERROR_TABLE_ENABLED.defaultValue());
+ if (isErrorTableEnabled && !isErrorRecordPresent(dataset)) {
+ dataset = dataset.withColumn(ERROR_TABLE_CURRUPT_RECORD_COL_NAME,
lit(null));
+ }
for (Transformer t : transformers) {
dataset = t.apply(jsc, sparkSession, dataset, properties);
+ // validate in every stage to ensure it's not dropped by one of the
transformer and added by next transformer.
+ validate(dataset, isErrorTableEnabled);
}
return dataset;
}
+
+ /**
+ * validates for constraints on ErrorRecordColumn when ErrorTable
enabled/disabled configs are set.
+ * @param dataset
+ * @param isErrorTableEnabled
+ */
+ private void validate(Dataset<Row> dataset, boolean isErrorTableEnabled) {
+ boolean isErrorRecordColumnPresent = isErrorRecordPresent(dataset);
+ if (isErrorTableEnabled != isErrorRecordColumnPresent) {
Review Comment:
but whats the next step user can do ? we can't even restart the pipeline
right. so, only other option is to disable quarantine.
##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/ChainedTransformer.java:
##########
@@ -46,9 +53,33 @@ public List<String> getTransformersNames() {
@Override
public Dataset<Row> apply(JavaSparkContext jsc, SparkSession sparkSession,
Dataset<Row> rowDataset, TypedProperties properties) {
Dataset<Row> dataset = rowDataset;
+ boolean isErrorTableEnabled =
properties.getBoolean(ERROR_TABLE_ENABLED.key(),
ERROR_TABLE_ENABLED.defaultValue());
+ if (isErrorTableEnabled && !isErrorRecordPresent(dataset)) {
+ dataset = dataset.withColumn(ERROR_TABLE_CURRUPT_RECORD_COL_NAME,
lit(null));
+ }
for (Transformer t : transformers) {
dataset = t.apply(jsc, sparkSession, dataset, properties);
+ // validate in every stage to ensure it's not dropped by one of the
transformer and added by next transformer.
+ validate(dataset, isErrorTableEnabled);
}
return dataset;
}
+
+ /**
+ * validates for constraints on ErrorRecordColumn when ErrorTable
enabled/disabled configs are set.
+ * @param dataset
+ * @param isErrorTableEnabled
+ */
+ private void validate(Dataset<Row> dataset, boolean isErrorTableEnabled) {
+ boolean isErrorRecordColumnPresent = isErrorRecordPresent(dataset);
+ if (isErrorTableEnabled != isErrorRecordColumnPresent) {
Review Comment:
yes, before adding the error record for the first time, we might need to do
this validation.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]