This is an automated email from the ASF dual-hosted git repository.

vinoyang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new f9d2f66  [HUDI-622]: Remove VisibleForTesting annotation and import 
from code (#1343)
f9d2f66 is described below

commit f9d2f66dc16540e3e5c1cb1f7f23b4fca7c656c3
Author: Suneel Marthi <smar...@apache.org>
AuthorDate: Thu Feb 20 02:17:53 2020 -0500

    [HUDI-622]: Remove VisibleForTesting annotation and import from code (#1343)
    
    * HUDI:622: Remove VisibleForTesting annotation and import from code
---
 hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java    | 3 ---
 hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java    | 3 ---
 .../apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java    | 2 --
 .../src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java | 4 ----
 .../java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java    | 3 ---
 .../src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java       | 4 ----
 .../compact/strategy/BoundedPartitionAwareCompactionStrategy.java   | 3 ---
 .../apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java | 3 ---
 .../src/main/java/org/apache/hudi/metrics/HoodieMetrics.java        | 2 --
 .../src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java | 2 --
 .../java/org/apache/hudi/common/bloom/filter/InternalFilter.java    | 4 ++--
 .../java/org/apache/hudi/common/util/BufferedRandomAccessFile.java  | 2 +-
 hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java  | 2 --
 .../main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java | 3 ---
 .../src/main/java/org/apache/hudi/common/util/RocksDBDAO.java       | 2 --
 .../org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java     | 5 -----
 .../src/main/scala/org/apache/hudi/AvroConversionHelper.scala       | 6 +++---
 .../main/java/org/apache/hudi/utilities/HDFSParquetImporter.java    | 2 --
 18 files changed, 6 insertions(+), 49 deletions(-)

diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java 
b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
index 9411782..fe0cc60 100644
--- a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
@@ -37,7 +37,6 @@ import org.apache.hudi.metrics.HoodieMetrics;
 import org.apache.hudi.table.HoodieTable;
 
 import com.codahale.metrics.Timer;
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -108,7 +107,6 @@ public class HoodieCleanClient<T extends 
HoodieRecordPayload> extends AbstractHo
    * @param startCleanTime Cleaner Instant Time
    * @return Cleaner Plan if generated
    */
-  @VisibleForTesting
   protected Option<HoodieCleanerPlan> scheduleClean(String startCleanTime) {
     // Create a Hoodie table which encapsulated the commits and files visible
     HoodieTable<T> table = HoodieTable.getHoodieTable(createMetaClient(true), 
config, jsc);
@@ -138,7 +136,6 @@ public class HoodieCleanClient<T extends 
HoodieRecordPayload> extends AbstractHo
    * @param table Hoodie Table
    * @param cleanInstant Cleaner Instant
    */
-  @VisibleForTesting
   protected HoodieCleanMetadata runClean(HoodieTable<T> table, HoodieInstant 
cleanInstant) {
     try {
       HoodieCleanerPlan cleanerPlan = 
CleanerUtils.getCleanerPlan(table.getMetaClient(), cleanInstant);
diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java 
b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
index 23055da..931ca07 100644
--- a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
@@ -61,7 +61,6 @@ import org.apache.hudi.table.WorkloadProfile;
 import org.apache.hudi.table.WorkloadStat;
 
 import com.codahale.metrics.Timer;
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import org.apache.log4j.LogManager;
@@ -121,7 +120,6 @@ public class HoodieWriteClient<T extends 
HoodieRecordPayload> extends AbstractHo
     this(jsc, clientConfig, rollbackPending, 
HoodieIndex.createIndex(clientConfig, jsc));
   }
 
-  @VisibleForTesting
   HoodieWriteClient(JavaSparkContext jsc, HoodieWriteConfig clientConfig, 
boolean rollbackPending, HoodieIndex index) {
     this(jsc, clientConfig, rollbackPending, index, Option.empty());
   }
@@ -1113,7 +1111,6 @@ public class HoodieWriteClient<T extends 
HoodieRecordPayload> extends AbstractHo
    * @param inflightInstant Inflight Compaction Instant
    * @param table Hoodie Table
    */
-  @VisibleForTesting
   void rollbackInflightCompaction(HoodieInstant inflightInstant, HoodieTable 
table) throws IOException {
     table.rollback(jsc, inflightInstant, false);
     // Revert instant state file
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java
 
b/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java
index 8045846..17b7506 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java
@@ -20,7 +20,6 @@ package org.apache.hudi.index.bloom;
 
 import org.apache.hudi.common.util.collection.Pair;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.hash.Hashing;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -152,7 +151,6 @@ public class BucketizedBloomCheckPartitioner extends 
Partitioner {
     return candidatePartitions.get(idx);
   }
 
-  @VisibleForTesting
   Map<String, List<Integer>> getFileGroupToPartitions() {
     return fileGroupToPartitions;
   }
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java 
b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java
index a6d46d8..22b4c3f 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java
@@ -32,7 +32,6 @@ import org.apache.hudi.index.HoodieIndex;
 import org.apache.hudi.io.HoodieRangeInfoHandle;
 import org.apache.hudi.table.HoodieTable;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 import org.apache.spark.Partitioner;
@@ -188,7 +187,6 @@ public class HoodieBloomIndex<T extends 
HoodieRecordPayload> extends HoodieIndex
   /**
    * Load all involved files as <Partition, filename> pair RDD.
    */
-  @VisibleForTesting
   List<Tuple2<String, BloomIndexFileInfo>> loadInvolvedFiles(List<String> 
partitions, final JavaSparkContext jsc,
                                                              final HoodieTable 
hoodieTable) {
 
@@ -262,7 +260,6 @@ public class HoodieBloomIndex<T extends 
HoodieRecordPayload> extends HoodieIndex
    * Sub-partition to ensure the records can be looked up against files & also 
prune file<=>record comparisons based on
    * recordKey ranges in the index info.
    */
-  @VisibleForTesting
   JavaRDD<Tuple2<String, HoodieKey>> explodeRecordRDDWithFileComparisons(
       final Map<String, List<BloomIndexFileInfo>> partitionToFileIndexInfo,
       JavaPairRDD<String, String> partitionRecordKeyPairRDD) {
@@ -289,7 +286,6 @@ public class HoodieBloomIndex<T extends 
HoodieRecordPayload> extends HoodieIndex
    * <p>
    * Make sure the parallelism is atleast the groupby parallelism for tagging 
location
    */
-  @VisibleForTesting
   JavaPairRDD<HoodieKey, HoodieRecordLocation> findMatchingFilesForRecordKeys(
       final Map<String, List<BloomIndexFileInfo>> partitionToFileIndexInfo,
       JavaPairRDD<String, String> partitionRecordKeyPairRDD, int 
shuffleParallelism, HoodieTable hoodieTable,
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java
 
b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java
index ba8976b..3c6cc72 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java
@@ -30,7 +30,6 @@ import org.apache.hudi.config.HoodieWriteConfig;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.table.HoodieTable;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.spark.api.java.JavaPairRDD;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
@@ -59,7 +58,6 @@ public class HoodieGlobalBloomIndex<T extends 
HoodieRecordPayload> extends Hoodi
    * Load all involved files as <Partition, filename> pair RDD from all 
partitions in the table.
    */
   @Override
-  @VisibleForTesting
   List<Tuple2<String, BloomIndexFileInfo>> loadInvolvedFiles(List<String> 
partitions, final JavaSparkContext jsc,
                                                              final HoodieTable 
hoodieTable) {
     HoodieTableMetaClient metaClient = hoodieTable.getMetaClient();
@@ -83,7 +81,6 @@ public class HoodieGlobalBloomIndex<T extends 
HoodieRecordPayload> extends Hoodi
    */
 
   @Override
-  @VisibleForTesting
   JavaRDD<Tuple2<String, HoodieKey>> explodeRecordRDDWithFileComparisons(
       final Map<String, List<BloomIndexFileInfo>> partitionToFileIndexInfo,
       JavaPairRDD<String, String> partitionRecordKeyPairRDD) {
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java 
b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java
index 12d352d..6d750cf 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java
@@ -36,7 +36,6 @@ import org.apache.hudi.exception.HoodieIndexException;
 import org.apache.hudi.index.HoodieIndex;
 import org.apache.hudi.table.HoodieTable;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HRegionLocation;
@@ -114,7 +113,6 @@ public class HBaseIndex<T extends HoodieRecordPayload> 
extends HoodieIndex<T> {
     this.hBaseIndexQPSResourceAllocator = 
createQPSResourceAllocator(this.config);
   }
 
-  @VisibleForTesting
   public HBaseIndexQPSResourceAllocator 
createQPSResourceAllocator(HoodieWriteConfig config) {
     try {
       LOG.info("createQPSResourceAllocator :" + 
config.getHBaseQPSResourceAllocatorClass());
@@ -387,7 +385,6 @@ public class HBaseIndex<T extends HoodieRecordPayload> 
extends HoodieIndex<T> {
     }
   }
 
-  @VisibleForTesting
   public Tuple2<Long, Integer> getHBasePutAccessParallelism(final 
JavaRDD<WriteStatus> writeStatusRDD) {
     final JavaPairRDD<Long, Integer> insertOnlyWriteStatusRDD = writeStatusRDD
         .filter(w -> w.getStat().getNumInserts() > 0).mapToPair(w -> new 
Tuple2<>(w.getStat().getNumInserts(), 1));
@@ -497,7 +494,6 @@ public class HBaseIndex<T extends HoodieRecordPayload> 
extends HoodieIndex<T> {
     return false;
   }
 
-  @VisibleForTesting
   public void setHbaseConnection(Connection hbaseConnection) {
     HBaseIndex.hbaseConnection = hbaseConnection;
   }
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedPartitionAwareCompactionStrategy.java
 
b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedPartitionAwareCompactionStrategy.java
index 5b64ced..6ba8213 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedPartitionAwareCompactionStrategy.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedPartitionAwareCompactionStrategy.java
@@ -22,8 +22,6 @@ import org.apache.hudi.avro.model.HoodieCompactionOperation;
 import org.apache.hudi.avro.model.HoodieCompactionPlan;
 import org.apache.hudi.config.HoodieWriteConfig;
 
-import com.google.common.annotations.VisibleForTesting;
-
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
 import java.util.Comparator;
@@ -68,7 +66,6 @@ public class BoundedPartitionAwareCompactionStrategy extends 
DayBasedCompactionS
         .filter(e -> comparator.compare(earliestPartitionPathToCompact, e) >= 
0).collect(Collectors.toList());
   }
 
-  @VisibleForTesting
   public static Date getDateAtOffsetFromToday(int offset) {
     Calendar calendar = Calendar.getInstance();
     calendar.add(Calendar.DATE, offset);
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java
 
b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java
index a491818..9d53776 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java
@@ -23,8 +23,6 @@ import org.apache.hudi.avro.model.HoodieCompactionPlan;
 import org.apache.hudi.config.HoodieWriteConfig;
 import org.apache.hudi.exception.HoodieException;
 
-import com.google.common.annotations.VisibleForTesting;
-
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Comparator;
@@ -55,7 +53,6 @@ public class DayBasedCompactionStrategy extends 
CompactionStrategy {
     }
   };
 
-  @VisibleForTesting
   public Comparator<String> getComparator() {
     return comparator;
   }
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java 
b/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java
index b6fcd09..4b2b48b 100644
--- a/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java
+++ b/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java
@@ -23,7 +23,6 @@ import org.apache.hudi.common.table.HoodieTimeline;
 import org.apache.hudi.config.HoodieWriteConfig;
 
 import com.codahale.metrics.Timer;
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 
@@ -187,7 +186,6 @@ public class HoodieMetrics {
     }
   }
 
-  @VisibleForTesting
   String getMetricsName(String action, String metric) {
     return config == null ? null : String.format("%s.%s.%s", tableName, 
action, metric);
   }
diff --git 
a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java 
b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
index 754b0ac..2e43013 100644
--- 
a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
+++ 
b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
@@ -42,7 +42,6 @@ import org.apache.hudi.func.MergeOnReadLazyInsertIterable;
 import org.apache.hudi.io.HoodieAppendHandle;
 import org.apache.hudi.io.compact.HoodieMergeOnReadTableCompactor;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -409,7 +408,6 @@ public class HoodieMergeOnReadTable<T extends 
HoodieRecordPayload> extends Hoodi
     }
 
     // TODO (NA) : Make this static part of utility
-    @VisibleForTesting
     public long convertLogFilesSizeToExpectedParquetSize(List<HoodieLogFile> 
hoodieLogFiles) {
       long totalSizeOfLogFiles = 
hoodieLogFiles.stream().map(HoodieLogFile::getFileSize)
           .filter(size -> size > 0).reduce(Long::sum).orElse(0L);
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/bloom/filter/InternalFilter.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/bloom/filter/InternalFilter.java
index 228143b..c0a5028 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/bloom/filter/InternalFilter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/bloom/filter/InternalFilter.java
@@ -145,8 +145,8 @@ abstract class InternalFilter implements Writable {
     if (keys == null) {
       throw new IllegalArgumentException("Key[] may not be null");
     }
-    for (int i = 0; i < keys.length; i++) {
-      add(keys[i]);
+    for (Key key : keys) {
+      add(key);
     }
   } //end add()
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/BufferedRandomAccessFile.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/util/BufferedRandomAccessFile.java
index 25fe7b0..15cf4c3 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/util/BufferedRandomAccessFile.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/util/BufferedRandomAccessFile.java
@@ -169,7 +169,7 @@ public final class BufferedRandomAccessFile extends 
RandomAccessFile {
   private int fillBuffer() throws IOException {
     int cnt = 0;
     int bytesToRead = this.capacity;
-    /** blocking read, until buffer is filled or EOF reached */
+    // blocking read, until buffer is filled or EOF reached
     while (bytesToRead > 0) {
       int n = super.read(this.dataBuffer.array(), cnt, bytesToRead);
       if (n < 0) {
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java 
b/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java
index bbfe7b1..3cc7bf2 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java
@@ -28,7 +28,6 @@ import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.exception.InvalidHoodiePathException;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -216,7 +215,6 @@ public class FSUtils {
    * @param excludeMetaFolder Exclude .hoodie folder
    * @throws IOException
    */
-  @VisibleForTesting
   static void processFiles(FileSystem fs, String basePathStr, 
Function<FileStatus, Boolean> consumer,
       boolean excludeMetaFolder) throws IOException {
     PathFilter pathFilter = excludeMetaFolder ? getExcludeMetaPathFilter() : 
ALLOW_ALL_FILTER;
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java
index d396000..1898a4e 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java
@@ -16,7 +16,6 @@
 
 package org.apache.hudi.common.util;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
@@ -252,7 +251,6 @@ public class ObjectSizeCalculator {
     size += objectSize;
   }
 
-  @VisibleForTesting
   static long roundTo(long x, int multiple) {
     return ((x + multiple - 1) / multiple) * multiple;
   }
@@ -325,7 +323,6 @@ public class ObjectSizeCalculator {
     throw new AssertionError("Encountered unexpected primitive type " + 
type.getName());
   }
 
-  @VisibleForTesting
   static MemoryLayoutSpecification getEffectiveMemoryLayoutSpecification() {
     final String vmName = System.getProperty("java.vm.name");
     if (vmName == null || !(vmName.startsWith("Java HotSpot(TM) ") || 
vmName.startsWith("OpenJDK")
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java 
b/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java
index ec46af4..8884e9c 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java
@@ -22,7 +22,6 @@ import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -450,7 +449,6 @@ public class RocksDBDAO {
     }
   }
 
-  @VisibleForTesting
   String getRocksDBBasePath() {
     return rocksDBBasePath;
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java
index 7ea6d5e..356f000 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java
@@ -23,7 +23,6 @@ import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.SizeEstimator;
 import org.apache.hudi.exception.HoodieException;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -62,7 +61,6 @@ public class BoundedInMemoryQueue<I, O> implements 
Iterable<O> {
   // It indicates number of records to cache. We will be using sampled 
record's average size to
   // determine how many
   // records we should cache and will change (increase/decrease) permits 
accordingly.
-  @VisibleForTesting
   public final Semaphore rateLimiter = new Semaphore(1);
   // used for sampling records with "RECORD_SAMPLING_RATE" frequency.
   public final AtomicLong samplingRecordCounter = new AtomicLong(-1);
@@ -86,10 +84,8 @@ public class BoundedInMemoryQueue<I, O> implements 
Iterable<O> {
   private final QueueIterator iterator;
   // indicates rate limit (number of records to cache). it is updated whenever 
there is a change
   // in avg record size.
-  @VisibleForTesting
   public int currentRateLimit = 1;
   // indicates avg record size in bytes. It is updated whenever a new record 
is sampled.
-  @VisibleForTesting
   public long avgRecordSizeInBytes = 0;
   // indicates number of samples collected so far.
   private long numSamples = 0;
@@ -119,7 +115,6 @@ public class BoundedInMemoryQueue<I, O> implements 
Iterable<O> {
     this.iterator = new QueueIterator();
   }
 
-  @VisibleForTesting
   public int size() {
     return this.queue.size();
   }
diff --git 
a/hudi-spark/src/main/scala/org/apache/hudi/AvroConversionHelper.scala 
b/hudi-spark/src/main/scala/org/apache/hudi/AvroConversionHelper.scala
index e1a7ae1..b61bef3 100644
--- a/hudi-spark/src/main/scala/org/apache/hudi/AvroConversionHelper.scala
+++ b/hudi-spark/src/main/scala/org/apache/hudi/AvroConversionHelper.scala
@@ -127,7 +127,7 @@ object AvroConversionHelper {
                   new Timestamp(item.asInstanceOf[Long])
                 case other =>
                   throw new IncompatibleSchemaException(
-                    s"Cannot convert Avro logical type ${other} to Catalyst 
Timestamp type.")
+                    s"Cannot convert Avro logical type $other to Catalyst 
Timestamp type.")
               }
             }
         case (struct: StructType, RECORD) =>
@@ -215,7 +215,7 @@ object AvroConversionHelper {
               createConverter(Schema.createUnion(remainingUnionTypes.asJava), 
sqlType, path)
             }
           } else avroSchema.getTypes.asScala.map(_.getType) match {
-            case Seq(t1) => createConverter(avroSchema.getTypes.get(0), 
sqlType, path)
+            case Seq(_) => createConverter(avroSchema.getTypes.get(0), 
sqlType, path)
             case Seq(a, b) if Set(a, b) == Set(INT, LONG) && sqlType == 
LongType =>
               (item: AnyRef) => {
                 item match {
@@ -286,7 +286,7 @@ object AvroConversionHelper {
       case ShortType => (item: Any) =>
         if (item == null) null else item.asInstanceOf[Short].intValue
       case dec: DecimalType => (item: Any) =>
-        Option(item).map { i =>
+        Option(item).map { _ =>
           val bigDecimalValue = item.asInstanceOf[java.math.BigDecimal]
           val decimalConversions = new DecimalConversion()
           decimalConversions.toFixed(bigDecimalValue, 
avroSchema.getField(structName).schema().getTypes.get(0),
diff --git 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
index 218df22..c326814 100644
--- 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
+++ 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
@@ -35,7 +35,6 @@ import com.beust.jcommander.IValueValidator;
 import com.beust.jcommander.JCommander;
 import com.beust.jcommander.Parameter;
 import com.beust.jcommander.ParameterException;
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.avro.Schema;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.hadoop.fs.FileSystem;
@@ -120,7 +119,6 @@ public class HDFSParquetImporter implements Serializable {
     return ret;
   }
 
-  @VisibleForTesting
   protected int dataImport(JavaSparkContext jsc) throws IOException {
     try {
       if (fs.exists(new Path(cfg.targetPath))) {

Reply via email to