This is an automated email from the ASF dual-hosted git repository.

pnowojski pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 1d45dd09045c2eb0dc406ce8216c9ab96111b581
Author: Piotr Nowojski <[email protected]>
AuthorDate: Thu Jan 11 09:37:10 2024 +0100

    [hotfix][test] Fix some Architecture Violation errors
---
 .../flink/api/java/hadoop/mapreduce/wrapper/HadoopInputSplit.java      | 2 ++
 .../apache/flink/api/java/typeutils/runtime/WritableComparator.java    | 2 ++
 .../apache/flink/api/java/typeutils/runtime/WritableSerializer.java    | 1 +
 .../functions/sink/filesystem/HadoopPathBasedBulkFormatBuilder.java    | 2 ++
 .../api/functions/sink/filesystem/SerializableConfiguration.java       | 3 +++
 .../flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV200.java | 2 --
 6 files changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/wrapper/HadoopInputSplit.java
 
b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/wrapper/HadoopInputSplit.java
index a2e64d92ed0..dcdf3d2cd6f 100644
--- 
a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/wrapper/HadoopInputSplit.java
+++ 
b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/wrapper/HadoopInputSplit.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.api.java.hadoop.mapreduce.wrapper;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.core.io.InputSplit;
 import org.apache.flink.core.io.LocatableInputSplit;
 
@@ -33,6 +34,7 @@ import java.io.ObjectOutputStream;
  * A wrapper that represents an input split from the Hadoop mapreduce API as a 
Flink {@link
  * InputSplit}.
  */
+@Internal
 public class HadoopInputSplit extends LocatableInputSplit {
 
     private static final long serialVersionUID = 6119153593707857235L;
diff --git 
a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java
 
b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java
index f7f4ffafb07..df3226dbddd 100644
--- 
a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java
+++ 
b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableComparator.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.api.java.typeutils.runtime;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.typeutils.TypeComparator;
 import org.apache.flink.core.memory.DataInputView;
 import org.apache.flink.core.memory.DataOutputView;
@@ -36,6 +37,7 @@ import java.io.IOException;
  *
  * @param <T>
  */
+@Internal
 public class WritableComparator<T extends Writable & Comparable<T>> extends 
TypeComparator<T> {
 
     private static final long serialVersionUID = 1L;
diff --git 
a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
 
b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
index bff42892bf4..0a42357e434 100644
--- 
a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
+++ 
b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
@@ -163,6 +163,7 @@ public final class WritableSerializer<T extends Writable> 
extends TypeSerializer
     }
 
     /** {@link WritableSerializer} snapshot class. */
+    @Internal
     public static final class WritableSerializerSnapshot<T extends Writable>
             extends GenericTypeSerializerSnapshot<T, WritableSerializer> {
 
diff --git 
a/flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/HadoopPathBasedBulkFormatBuilder.java
 
b/flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/HadoopPathBasedBulkFormatBuilder.java
index b3199e3dc0b..1c29cf156de 100644
--- 
a/flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/HadoopPathBasedBulkFormatBuilder.java
+++ 
b/flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/HadoopPathBasedBulkFormatBuilder.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.streaming.api.functions.sink.filesystem;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.core.fs.Path;
 import org.apache.flink.formats.hadoop.bulk.DefaultHadoopFileCommitterFactory;
 import org.apache.flink.formats.hadoop.bulk.HadoopFileCommitterFactory;
@@ -32,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import java.io.IOException;
 
 /** Buckets builder to create buckets that use {@link 
HadoopPathBasedPartFileWriter}. */
+@Internal
 public class HadoopPathBasedBulkFormatBuilder<
                 IN, BucketID, T extends HadoopPathBasedBulkFormatBuilder<IN, 
BucketID, T>>
         extends StreamingFileSink.BucketsBuilder<IN, BucketID, T> {
diff --git 
a/flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/SerializableConfiguration.java
 
b/flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/SerializableConfiguration.java
index f7235c92e78..2aa39280416 100644
--- 
a/flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/SerializableConfiguration.java
+++ 
b/flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/SerializableConfiguration.java
@@ -17,6 +17,8 @@
 
 package org.apache.flink.streaming.api.functions.sink.filesystem;
 
+import org.apache.flink.annotation.Internal;
+
 import org.apache.hadoop.conf.Configuration;
 
 import java.io.IOException;
@@ -25,6 +27,7 @@ import java.io.ObjectOutputStream;
 import java.io.Serializable;
 
 /** Wrapper of hadoop Configuration to make it serializable. */
+@Internal
 public class SerializableConfiguration implements Serializable {
 
     private static final long serialVersionUID = 1L;
diff --git 
a/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV200.java
 
b/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV200.java
index b0e1605d702..9e0cdb5261a 100644
--- 
a/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV200.java
+++ 
b/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV200.java
@@ -18,7 +18,6 @@
 
 package org.apache.flink.orc.shim;
 
-import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.orc.OrcFilters.Predicate;
 import org.apache.flink.orc.vector.HiveOrcBatchWrapper;
@@ -166,7 +165,6 @@ public class OrcShimV200 implements 
OrcShim<VectorizedRowBatch> {
         }
     }
 
-    @VisibleForTesting
     public static Tuple2<Long, Long> getOffsetAndLengthForSplit(
             long splitStart, long splitLength, List<StripeInformation> 
stripes) {
         long splitEnd = splitStart + splitLength;

Reply via email to