This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 15c77c6e9b HDDS-9704. Eliminate dependency on spotbugs-annotations
(#5615)
15c77c6e9b is described below
commit 15c77c6e9bb22a0bbf5d0080f81c9310051229a9
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Fri Nov 17 10:19:54 2023 +0100
HDDS-9704. Eliminate dependency on spotbugs-annotations (#5615)
---
CONTRIBUTING.md | 12 --
.../client/dev-support/findbugsExcludeFile.xml | 4 +
hadoop-hdds/client/pom.xml | 6 -
.../hadoop/hdds/scm/storage/ByteArrayReader.java | 3 -
.../common/dev-support/findbugsExcludeFile.xml | 4 +
hadoop-hdds/common/pom.xml | 5 -
.../hdds/ratis/ServerNotLeaderException.java | 1 +
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 2 +
...estServerNotLeaderExceptionMessageParsing.java} | 5 +-
.../dev-support/findbugsExcludeFile.xml | 4 +-
hadoop-hdds/container-service/pom.xml | 5 -
.../container/common/volume/AbstractFuture.java | 2 -
.../container/keyvalue/TestKeyValueHandler.java | 4 +-
.../TestBackgroundContainerDataScanner.java | 3 -
.../TestBackgroundContainerMetadataScanner.java | 3 -
.../dev-support/findbugsExcludeFile.xml | 13 +-
hadoop-hdds/erasurecode/pom.xml | 15 +-
.../ozone/erasurecode/rawcoder/util/GF256.java | 2 -
hadoop-hdds/framework/pom.xml | 5 -
.../utils/db/TestRDBStoreByteArrayIterator.java | 2 -
hadoop-hdds/managed-rocksdb/pom.xml | 1 +
.../dev-support/findbugsExcludeFile.xml | 13 +-
hadoop-hdds/rocks-native/pom.xml | 16 +-
.../utils/db/managed/ManagedSSTDumpIterator.java | 3 -
hadoop-hdds/rocksdb-checkpoint-differ/pom.xml | 5 -
hadoop-hdds/server-scm/pom.xml | 6 -
.../container/replication/ReplicationManager.java | 2 -
.../hadoop/hdds/scm/server/SCMPolicyProvider.java | 24 ++-
hadoop-hdds/test-utils/pom.xml | 5 -
.../org/apache/ozone/test/LambdaTestUtils.java | 203 +--------------------
hadoop-ozone/common/pom.xml | 5 -
hadoop-ozone/insight/pom.xml | 5 -
hadoop-ozone/integration-test/pom.xml | 5 -
.../hadoop/ozone/om/TestOMDbCheckpointServlet.java | 13 +-
.../ozone/om/TestOmContainerLocationCache.java | 86 ++++-----
.../org/apache/hadoop/ozone/om/TestOmSnapshot.java | 2 -
.../dev-support/findbugsExcludeFile.xml | 8 +-
hadoop-ozone/ozone-manager/pom.xml | 5 -
.../apache/hadoop/ozone/om/OMPolicyProvider.java | 24 ++-
.../apache/hadoop/ozone/om/OmSnapshotMetrics.java | 25 +--
.../om/snapshot/FSODirectoryPathResolver.java | 7 +-
.../ozone/om/snapshot/SnapshotDiffManager.java | 4 +-
.../hadoop/ozone/om/TestAuthorizerLockImpl.java | 24 +--
.../hadoop/ozone/om/TestOmSnapshotManager.java | 2 -
.../request/volume/TestOMVolumeCreateRequest.java | 1 +
.../om/snapshot/TestFSODirectoryPathResolver.java | 2 -
.../ozone/om/snapshot/TestSnapshotDiffManager.java | 3 -
hadoop-ozone/ozonefs-common/pom.xml | 5 -
.../hadoop/fs/ozone/BasicOzoneFileSystem.java | 2 +-
.../fs/ozone/BasicRootedOzoneFileSystem.java | 2 +-
.../apache/hadoop/fs/ozone/FileStatusAdapter.java | 12 +-
.../recon/dev-support/findbugsExcludeFile.xml | 4 +
hadoop-ozone/recon/pom.xml | 5 -
.../ozone/recon/scm/ReconPolicyProvider.java | 24 ++-
.../ozone/recon/tasks/TestNSSummaryTask.java | 12 +-
.../recon/tasks/TestNSSummaryTaskWithFSO.java | 20 +-
.../recon/tasks/TestNSSummaryTaskWithLegacy.java | 20 +-
hadoop-ozone/s3gateway/pom.xml | 5 -
.../hadoop/ozone/s3/endpoint/BucketEndpoint.java | 2 -
hadoop-ozone/tools/pom.xml | 5 -
.../ozone/audit/parser/common/DatabaseHelper.java | 2 -
.../freon/OzoneClientKeyReadWriteListOps.java | 74 ++++----
.../hadoop/ozone/freon/RandomKeyGenerator.java | 2 -
pom.xml | 6 -
64 files changed, 210 insertions(+), 591 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 131591783b..649b392f78 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -159,18 +159,6 @@ Some scripts require third-party tools, but most of these
are installed during t
Most scripts (except `build.sh`) output results in `target/<name>`, e.g.
`target/docs`.
-### False positive findbugs violation
-
-If you have __very good__ reasons, you can ignore any Fingbugs warning. Your
good reason can be persisted with the `@SuppressFBWarnings` annotation.
-
-```java
-@SuppressFBWarnings(value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
- justification="The method is synchronized and this is the only place "+
- "dnsToUuidMap is modified")
-private synchronized void addEntryTodnsToUuidMap(
-...
-```
-
## Using IDE
As Ozone uses Apache Maven it can be developed from any IDE. IntelliJ IDEA is
a common choice, here are some suggestions to use it for Ozone development.
diff --git a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
index 57d28c4ce1..7b874905bf 100644
--- a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
@@ -16,6 +16,10 @@
-->
<FindBugsFilter>
<!-- Test -->
+ <Match>
+ <Class name="org.apache.hadoop.hdds.scm.storage.ByteArrayReader"></Class>
+ <Bug pattern="EI_EXPOSE_REP2" /> <!-- "Deep copy byte[] has bad impact on
performance" -->
+ </Match>
<Match>
<Class name="org.apache.hadoop.hdds.scm.storage.TestBufferPool"></Class>
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index fd44ffc243..8dfe669a7e 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -64,12 +64,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>slf4j-reload4j</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <version>${spotbugs.version}</version>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
diff --git
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteArrayReader.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteArrayReader.java
index a1c30526ee..5f05628995 100644
---
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteArrayReader.java
+++
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteArrayReader.java
@@ -23,7 +23,6 @@ import java.io.IOException;
import java.io.InputStream;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* An {@link ByteReaderStrategy} implementation which supports byte[] as the
@@ -35,8 +34,6 @@ public class ByteArrayReader implements ByteReaderStrategy {
private int offset;
private int targetLen;
- @SuppressFBWarnings(value = "EI_EXPOSE_REP2",
- justification = "Deep copy byte[] has bad impact on performance")
public ByteArrayReader(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
index f7fb8591ef..ec4cd0ac5a 100644
--- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
@@ -22,6 +22,10 @@
<Class name="org.apache.hadoop.hdds.cli.GenericCli"></Class>
<Bug pattern="DM_EXIT" />
</Match>
+ <Match>
+ <Class name="org.apache.hadoop.ozone.OzoneConsts"/>
+ <Bug pattern="DMI_HARDCODED_ABSOLUTE_FILENAME" />
+ </Match>
<Match>
<Class
name="org.apache.hadoop.ozone.common.ChecksumByteBuffer$CrcIntTable" />
<Method name="update" />
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index f94ddc8cee..1195ce5552 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -223,11 +223,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<scope>test</scope>
<!-- Needed for mocking RaftServerImpl -->
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>test</scope>
- </dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-api</artifactId>
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
index 2cc6b07907..5a1e2864b5 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
@@ -51,6 +51,7 @@ public class ServerNotLeaderException extends IOException {
this.leader = suggestedLeader;
}
+ // required for creation by RemoteException#unwrapRemoteException
public ServerNotLeaderException(String message) {
super(message);
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 47610e29a3..01fa7c8a52 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -25,6 +25,7 @@ import org.apache.ratis.thirdparty.io.grpc.Metadata;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
+import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.regex.Pattern;
@@ -101,6 +102,7 @@ public final class OzoneConsts {
public static final String OZONE_HTTP_SCHEME = "http";
public static final String OZONE_URI_DELIMITER = "/";
public static final String OZONE_ROOT = OZONE_URI_DELIMITER;
+ public static final Path ROOT_PATH = Paths.get(OZONE_ROOT);
public static final String CONTAINER_EXTENSION = ".container";
diff --git
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderException.java
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java
similarity index 95%
rename from
hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderException.java
rename to
hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java
index 7d44d060fd..05ad970057 100644
---
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderException.java
+++
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java
@@ -18,14 +18,11 @@
package org.apache.hadoop.hdds.ratis;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
/** Class to test {@link ServerNotLeaderException} parsing. **/
-
-@SuppressFBWarnings("NM_CLASS_NOT_EXCEPTION")
-public class TestServerNotLeaderException {
+class TestServerNotLeaderExceptionMessageParsing {
@Test
public void testServerNotLeaderException() {
diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
index be2e2df5f1..0791ffb9ea 100644
--- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
@@ -87,10 +87,10 @@
</Match>
<Match>
<Class
name="org.apache.hadoop.ozone.container.ozoneimpl.TestBackgroundContainerDataScanner"/>
- <Bug pattern="RU_INVOKE_RUN" />
+ <Bug pattern="RU_INVOKE_RUN, RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT" />
</Match>
<Match>
<Class
name="org.apache.hadoop.ozone.container.ozoneimpl.TestBackgroundContainerMetadataScanner"/>
- <Bug pattern="RU_INVOKE_RUN" />
+ <Bug pattern="RU_INVOKE_RUN, RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT" />
</Match>
</FindBugsFilter>
diff --git a/hadoop-hdds/container-service/pom.xml
b/hadoop-hdds/container-service/pom.xml
index 1144b6018a..ec17b4e331 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -86,11 +86,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<scope>provided</scope>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
index a148513a36..1e0d2ecd3a 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
@@ -29,7 +29,6 @@ import
com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
.newUpdater;
@@ -163,7 +162,6 @@ public abstract class AbstractFuture<V> implements
ListenableFuture<V> {
// Prevent rare disastrous classloading in first call to LockSupport.park.
// See: https://bugs.openjdk.java.net/browse/JDK-8074773
@SuppressWarnings("unused")
- @SuppressFBWarnings
Class<?> ensureLoaded = LockSupport.class;
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index b1cd976b40..bfaf7b1fca 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -26,7 +26,6 @@ import java.util.HashMap;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
@@ -92,7 +91,7 @@ public class TestKeyValueHandler {
private static final String DATANODE_UUID = UUID.randomUUID().toString();
private static final long DUMMY_CONTAINER_ID = 9999;
- private static final String DUMMY_PATH = "/dummy/dir/doesnt/exist";
+ private static final String DUMMY_PATH = "dummy/dir/doesnt/exist";
private final ContainerLayoutVersion layout;
@@ -349,7 +348,6 @@ public class TestKeyValueHandler {
ContainerProtos.Result.INVALID_CONTAINER_STATE, response.getResult());
}
- @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME")
@Test
public void testDeleteContainer() throws IOException {
final String testDir = tempDir.newFolder().getAbsolutePath();
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
index 4b37df0e14..0640606bee 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
@@ -19,7 +19,6 @@
*/
package org.apache.hadoop.ozone.container.ozoneimpl;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -192,8 +191,6 @@ public class TestBackgroundContainerDataScanner extends
*/
@Test
@Override
- // Override findbugs warning about Mockito.verify
- @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT")
public void testWithVolumeFailure() throws Exception {
Mockito.when(vol.isFailed()).thenReturn(true);
// Run the scanner thread in the background. It should be terminated on
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
index 81f6ba3d40..aa7246ad7f 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
@@ -19,7 +19,6 @@
*/
package org.apache.hadoop.ozone.container.ozoneimpl;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -175,8 +174,6 @@ public class TestBackgroundContainerMetadataScanner extends
*/
@Test
@Override
- // Override findbugs warning about Mockito.verify
- @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT")
public void testWithVolumeFailure() throws Exception {
Mockito.when(vol.isFailed()).thenReturn(true);
diff --git a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/erasurecode/dev-support/findbugsExcludeFile.xml
similarity index 67%
copy from hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
copy to hadoop-hdds/erasurecode/dev-support/findbugsExcludeFile.xml
index 57d28c4ce1..75b376f008 100644
--- a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/erasurecode/dev-support/findbugsExcludeFile.xml
@@ -15,17 +15,8 @@
limitations under the License.
-->
<FindBugsFilter>
- <!-- Test -->
<Match>
- <Class name="org.apache.hadoop.hdds.scm.storage.TestBufferPool"></Class>
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.storage.TestChunkInputStream"></Class>
- <Bug pattern="RR_NOT_CHECKED" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.storage.TestBlockInputStream"></Class>
- <Bug pattern="RR_NOT_CHECKED" />
+ <Class name="org.apache.ozone.erasurecode.rawcoder.util.GF256"></Class>
+ <Bug pattern="MS_EXPOSE_REP" />
</Match>
</FindBugsFilter>
diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml
index 494559fec5..1654236954 100644
--- a/hadoop-hdds/erasurecode/pom.xml
+++ b/hadoop-hdds/erasurecode/pom.xml
@@ -44,14 +44,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hdds-test-utils</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
</dependencies>
<build>
-
+ <plugins>
+ <plugin>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-maven-plugin</artifactId>
+ <configuration>
+
<excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+ </configuration>
+ </plugin>
+ </plugins>
</build>
</project>
diff --git
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
index a75dd59dc4..f0cd02beb2 100644
---
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
+++
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
@@ -17,7 +17,6 @@
*/
package org.apache.ozone.erasurecode.rawcoder.util;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
/**
@@ -139,7 +138,6 @@ public final class GF256 {
(byte) 0xaf
};
- @SuppressFBWarnings("MS_EXPOSE_REP")
private static byte[][] theGfMulTab; // multiply result table in GF 256 space
/**
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index 285e857d28..11e47c1169 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -164,11 +164,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
index 223b82c353..4efeb2c590 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
@@ -18,7 +18,6 @@
*/
package org.apache.hadoop.hdds.utils.db;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
import org.apache.log4j.Level;
@@ -307,7 +306,6 @@ public class TestRDBStoreByteArrayIterator {
}
@Test
- @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT")
public void testGetStackTrace() {
ManagedRocksIterator iterator = mock(ManagedRocksIterator.class);
RocksIterator mock = mock(RocksIterator.class);
diff --git a/hadoop-hdds/managed-rocksdb/pom.xml
b/hadoop-hdds/managed-rocksdb/pom.xml
index 573d5c0f96..f27643a482 100644
--- a/hadoop-hdds/managed-rocksdb/pom.xml
+++ b/hadoop-hdds/managed-rocksdb/pom.xml
@@ -43,4 +43,5 @@
</dependencies>
<build/>
+
</project>
diff --git a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/rocks-native/dev-support/findbugsExcludeFile.xml
similarity index 67%
copy from hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
copy to hadoop-hdds/rocks-native/dev-support/findbugsExcludeFile.xml
index 57d28c4ce1..9d03a7b7a6 100644
--- a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/rocks-native/dev-support/findbugsExcludeFile.xml
@@ -15,17 +15,8 @@
limitations under the License.
-->
<FindBugsFilter>
- <!-- Test -->
<Match>
- <Class name="org.apache.hadoop.hdds.scm.storage.TestBufferPool"></Class>
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.storage.TestChunkInputStream"></Class>
- <Bug pattern="RR_NOT_CHECKED" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.storage.TestBlockInputStream"></Class>
- <Bug pattern="RR_NOT_CHECKED" />
+ <Class
name="org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpIterator$KeyValue"/>
+ <Bug pattern="EI_EXPOSE_REP" />
</Match>
</FindBugsFilter>
diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml
index e40a95d007..dcd8aad8a4 100644
--- a/hadoop-hdds/rocks-native/pom.xml
+++ b/hadoop-hdds/rocks-native/pom.xml
@@ -37,11 +37,6 @@
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-io</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
@@ -70,6 +65,17 @@
<maven.compiler.target>8</maven.compiler.target>
</properties>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-maven-plugin</artifactId>
+ <configuration>
+
<excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
<profiles>
<profile>
<id>cpu-count</id>
diff --git
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java
index 950bfa5979..38358df9f6 100644
---
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java
+++
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.utils.db.managed;
import com.google.common.collect.Maps;
import com.google.common.primitives.UnsignedLong;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.util.ClosableIterator;
import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
@@ -288,7 +287,6 @@ public abstract class ManagedSSTDumpIterator<T> implements
ClosableIterator<T> {
this.value = value;
}
- @SuppressFBWarnings("EI_EXPOSE_REP")
public byte[] getKey() {
return key;
}
@@ -301,7 +299,6 @@ public abstract class ManagedSSTDumpIterator<T> implements
ClosableIterator<T> {
return type;
}
- @SuppressFBWarnings("EI_EXPOSE_REP")
public byte[] getValue() {
return value;
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
index 6e8f8df049..ee7db36291 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
@@ -50,11 +50,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index caab32f301..f121c41982 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -67,12 +67,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>metrics-core</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
-
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index ff804e40f3..34b0183ffc 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hdds.scm.container.replication;
import com.google.common.annotations.VisibleForTesting;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -1409,7 +1408,6 @@ public class ReplicationManager implements SCMService {
return ReplicationManager.class.getSimpleName();
}
- @SuppressFBWarnings("IS2_INCONSISTENT_SYNC")
public ReplicationManagerMetrics getMetrics() {
return metrics;
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
index ce0d5e25eb..0ea2d0e955 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.scm.server;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
@@ -31,8 +30,11 @@ import
org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
+import org.apache.ratis.util.MemoizedSupplier;
-import java.util.concurrent.atomic.AtomicReference;
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.Supplier;
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL;
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL;
@@ -49,8 +51,8 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SEC
@InterfaceStability.Unstable
public final class SCMPolicyProvider extends PolicyProvider {
- private static AtomicReference<SCMPolicyProvider> atomicReference =
- new AtomicReference<>();
+ private static final Supplier<SCMPolicyProvider> SUPPLIER =
+ MemoizedSupplier.valueOf(SCMPolicyProvider::new);
private SCMPolicyProvider() {
}
@@ -58,14 +60,11 @@ public final class SCMPolicyProvider extends PolicyProvider
{
@Private
@Unstable
public static SCMPolicyProvider getInstance() {
- if (atomicReference.get() == null) {
- atomicReference.compareAndSet(null, new SCMPolicyProvider());
- }
- return atomicReference.get();
+ return SUPPLIER.get();
}
- private static final Service[] SCM_SERVICES =
- new Service[]{
+ private static final List<Service> SCM_SERVICES =
+ Arrays.asList(
new Service(
HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL,
StorageContainerDatanodeProtocol.class),
@@ -87,12 +86,11 @@ public final class SCMPolicyProvider extends PolicyProvider
{
new Service(
HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL,
SecretKeyProtocolDatanode.class)
- };
+ );
- @SuppressFBWarnings("EI_EXPOSE_REP")
@Override
public Service[] getServices() {
- return SCM_SERVICES;
+ return SCM_SERVICES.toArray(new Service[0]);
}
}
diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml
index e9c8f2b454..ae63b508b8 100644
--- a/hadoop-hdds/test-utils/pom.xml
+++ b/hadoop-hdds/test-utils/pom.xml
@@ -33,11 +33,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</properties>
<dependencies>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
diff --git
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
index ce732cf550..661989dade 100644
---
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
+++
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
@@ -19,7 +19,6 @@
package org.apache.ozone.test;
import com.google.common.base.Preconditions;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -124,7 +123,7 @@ public final class LambdaTestUtils {
| VirtualMachineError e) {
throw e;
} catch (Throwable e) {
- LOG.debug("eventually() iteration {}", iterations, e);
+ LOG.debug("await() iteration {}", iterations, e);
ex = e;
}
running = System.currentTimeMillis() < endTime;
@@ -183,73 +182,6 @@ public final class LambdaTestUtils {
new GenerateTimeout());
}
- /**
- * Repeatedly execute a closure until it returns a value rather than
- * raise an exception.
- * Exceptions are caught and, with one exception,
- * trigger a sleep and retry. This is similar of ScalaTest's
- * {@code eventually(timeout, closure)} operation, though that lacks
- * the ability to fail fast if the inner closure has determined that
- * a failure condition is non-recoverable.
- * <p>
- * Example: spin until an the number of files in a filesystem is non-zero,
- * returning the files found.
- * The sleep interval backs off by 500 ms each iteration to a maximum of 5s.
- * <pre>
- * FileStatus[] files = eventually( 30 * 1000,
- * () -> {
- * FileStatus[] f = filesystem.listFiles(new Path("/"));
- * assertEquals(0, f.length);
- * return f;
- * },
- * new ProportionalRetryInterval(500, 5000));
- * </pre>
- * This allows for a fast exit, yet reduces probe frequency over time.
- *
- * @param <T> return type
- * @param timeoutMillis timeout in milliseconds.
- * Can be zero, in which case only one attempt is made before failing.
- * @param eval expression to evaluate
- * @param retry retry interval generator
- * @return result of the first successful eval call
- * @throws Exception the last exception thrown before timeout was triggered
- * @throws FailFastException if raised -without any retry attempt.
- * @throws InterruptedException if interrupted during the sleep operation.
- * @throws OutOfMemoryError you've run out of memory.
- */
- @SuppressFBWarnings("DLS_DEAD_LOCAL_STORE")
- public static <T> T eventually(int timeoutMillis,
- Callable<T> eval,
- Callable<Integer> retry) throws Exception {
- Preconditions.checkArgument(timeoutMillis >= 0,
- "timeoutMillis must be >= 0");
- final long endTime = System.currentTimeMillis() + timeoutMillis;
- Throwable ex;
- boolean running;
- int iterations = 0;
- do {
- iterations++;
- try {
- return eval.call();
- } catch (InterruptedException
- | FailFastException
- | VirtualMachineError e) {
- // these two exceptions trigger an immediate exit
- throw e;
- } catch (Throwable e) {
- LOG.debug("evaluate() iteration {}", iterations, e);
- ex = e;
- running = System.currentTimeMillis() < endTime;
- int sleeptime = retry.call();
- if (running && sleeptime >= 0) {
- Thread.sleep(sleeptime);
- }
- }
- } while (running);
- // timeout. Throw the last exception raised
- return raise(ex);
- }
-
/**
* Take the throwable and raise it as an exception or an error, depending
* upon its type. This allows callers to declare that they only throw
@@ -269,70 +201,6 @@ public final class LambdaTestUtils {
}
}
- /**
- * Variant of {@link #eventually(int, Callable, Callable)} method for
- * void lambda expressions.
- * @param timeoutMillis timeout in milliseconds.
- * Can be zero, in which case only one attempt is made before failing.
- * @param eval expression to evaluate
- * @param retry retry interval generator
- * @throws Exception the last exception thrown before timeout was triggered
- * @throws FailFastException if raised -without any retry attempt.
- * @throws InterruptedException if interrupted during the sleep operation.
- */
- public static void eventually(int timeoutMillis,
- VoidCallable eval,
- Callable<Integer> retry) throws Exception {
- eventually(timeoutMillis, new VoidCaller(eval), retry);
- }
-
- /**
- * Simplified {@link #eventually(int, Callable, Callable)} method
- * with a fixed interval.
- * <p>
- * Example: wait 30s until an assertion holds, sleeping 1s between each
- * check.
- * <pre>
- * eventually( 30 * 1000, 1000,
- * () -> { assertEquals(0, filesystem.listFiles(new Path("/")).length); }
- * );
- * </pre>
- *
- * @param timeoutMillis timeout in milliseconds.
- * Can be zero, in which case only one attempt is made before failing.
- * @param intervalMillis interval in milliseconds
- * @param eval expression to evaluate
- * @return result of the first successful invocation of {@code eval()}
- * @throws Exception the last exception thrown before timeout was triggered
- * @throws FailFastException if raised -without any retry attempt.
- * @throws InterruptedException if interrupted during the sleep operation.
- */
- public static <T> T eventually(int timeoutMillis,
- int intervalMillis,
- Callable<T> eval) throws Exception {
- return eventually(timeoutMillis, eval,
- new FixedRetryInterval(intervalMillis));
- }
-
- /**
- /**
- * Variant of {@link #eventually(int, int, Callable)} method for
- * void lambda expressions.
- * @param timeoutMillis timeout in milliseconds.
- * Can be zero, in which case only one attempt is made before failing.
- * @param intervalMillis interval in milliseconds
- * @param eval expression to evaluate
- * @throws Exception the last exception thrown before timeout was triggered
- * @throws FailFastException if raised -without any retry attempt.
- * @throws InterruptedException if interrupted during the sleep operation.
- */
- public static void eventually(int timeoutMillis,
- int intervalMillis,
- VoidCallable eval) throws Exception {
- eventually(timeoutMillis, eval,
- new FixedRetryInterval(intervalMillis));
- }
-
/**
* Robust string converter for exception messages; if the {@code toString()}
* method throws an exception then that exception is caught and logged,
@@ -377,7 +245,6 @@ public final class LambdaTestUtils {
* Invoke a callable; wrap all checked exceptions with an
* AssertionError.
* @param closure closure to execute
- * @return the value of the closure
* @throws AssertionError if the operation raised an IOE or
* other checked exception.
*/
@@ -414,8 +281,7 @@ public final class LambdaTestUtils {
* @return TimeoutException
*/
@Override
- public Throwable evaluate(int timeoutMillis, Throwable caught)
- throws Throwable {
+ public Throwable evaluate(int timeoutMillis, Throwable caught) {
String s = String.format("%s: after %d millis", message,
timeoutMillis);
String caughtText = caught != null
@@ -459,55 +325,8 @@ public final class LambdaTestUtils {
}
}
- /**
- * Gradually increase the sleep time by the initial interval, until
- * the limit set by {@code maxIntervalMillis} is reached.
- */
- public static class ProportionalRetryInterval implements Callable<Integer> {
- private final int intervalMillis;
- private final int maxIntervalMillis;
- private int current;
- private int invocationCount = 0;
-
- public ProportionalRetryInterval(int intervalMillis,
- int maxIntervalMillis) {
- Preconditions.checkArgument(intervalMillis > 0);
- Preconditions.checkArgument(maxIntervalMillis > 0);
- this.intervalMillis = intervalMillis;
- this.current = intervalMillis;
- this.maxIntervalMillis = maxIntervalMillis;
- }
-
- @Override
- public Integer call() throws Exception {
- invocationCount++;
- int last = current;
- if (last < maxIntervalMillis) {
- current += intervalMillis;
- }
- return last;
- }
-
- public int getInvocationCount() {
- return invocationCount;
- }
-
- @Override
- public String toString() {
- final StringBuilder sb = new StringBuilder(
- "ProportionalRetryInterval{");
- sb.append("interval=").append(intervalMillis);
- sb.append(", current=").append(current);
- sb.append(", limit=").append(maxIntervalMillis);
- sb.append(", invocationCount=").append(invocationCount);
- sb.append('}');
- return sb.toString();
- }
- }
-
/**
* An exception which triggers a fast exist from the
- * {@link #eventually(int, Callable, Callable)} and
* {@link #await(int, Callable, Callable, TimeoutHandler)} loops.
*/
public static class FailFastException extends Exception {
@@ -540,22 +359,4 @@ public final class LambdaTestUtils {
void call() throws Exception;
}
- /**
- * Bridge class to make {@link VoidCallable} something to use in anything
- * which takes an {@link Callable}.
- */
- public static class VoidCaller implements Callable<Void> {
- private final VoidCallable callback;
-
- public VoidCaller(VoidCallable callback) {
- this.callback = callback;
- }
-
- @Override
- public Void call() throws Exception {
- callback.call();
- return null;
- }
- }
-
}
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index e5448f6c39..d074c36060 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -102,11 +102,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hdds-hadoop-dependency-test</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml
index 314177aa71..3524dc3c29 100644
--- a/hadoop-ozone/insight/pom.xml
+++ b/hadoop-ozone/insight/pom.xml
@@ -78,11 +78,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
diff --git a/hadoop-ozone/integration-test/pom.xml
b/hadoop-ozone/integration-test/pom.xml
index 6e202d0267..0a35660a39 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -277,11 +277,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.slf4j</groupId>
<artifactId>jul-to-slf4j</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>test</scope>
- </dependency>
</dependencies>
<build>
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index 2f5dce0a74..d140e0aeaf 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -54,7 +54,6 @@ import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.google.common.collect.Sets;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
@@ -786,7 +785,6 @@ public class TestOMDbCheckpointServlet {
}
// Get all files below path, recursively, (skipping fabricated files).
- @SuppressFBWarnings({"NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE"})
private Set<String> getFiles(Path path, int truncateLength,
Set<String> fileSet) throws IOException {
try (Stream<Path> files = Files.list(path)) {
@@ -794,7 +792,7 @@ public class TestOMDbCheckpointServlet {
if (file.toFile().isDirectory()) {
getFiles(file, truncateLength, fileSet);
}
- String filename = file.getFileName().toString();
+ String filename = String.valueOf(file.getFileName());
if (!filename.startsWith("fabricated") &&
!filename.startsWith(OZONE_RATIS_SNAPSHOT_COMPLETE_FLAG_NAME)) {
fileSet.add(truncateFileName(truncateLength, file));
@@ -816,7 +814,6 @@ public class TestOMDbCheckpointServlet {
* @param lines Text lines defining the link paths.
* @param testDirName Name of test directory.
*/
- @SuppressFBWarnings({"NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE"})
private void checkFabricatedLines(Set<String> directories, List<String>
lines,
String testDirName) {
// find the real file
@@ -845,9 +842,11 @@ public class TestOMDbCheckpointServlet {
"fabricated entry contains correct real directory: " + line);
Path path0 = Paths.get(files[0]);
Path path1 = Paths.get(files[1]);
- Assertions.assertTrue(
- path0.getFileName().toString().equals(FABRICATED_FILE_NAME) &&
- path1.getFileName().toString().equals(FABRICATED_FILE_NAME),
+ Assertions.assertEquals(FABRICATED_FILE_NAME,
+ String.valueOf(path0.getFileName()),
+ "fabricated entries contains correct file name: " + line);
+ Assertions.assertEquals(FABRICATED_FILE_NAME,
+ String.valueOf(path1.getFileName()),
"fabricated entries contains correct file name: " + line);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
index f7d0b05054..2c9506d530 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.om;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
@@ -97,6 +96,7 @@ import java.util.List;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Stream;
import static com.google.common.collect.Sets.newHashSet;
@@ -147,7 +147,7 @@ public class TestOmContainerLocationCache {
MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
private static final DatanodeDetails DN2 =
MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
- private static long testContainerId = 1L;
+ private static final AtomicLong CONTAINER_ID = new AtomicLong(1);
@BeforeAll
@@ -235,9 +235,8 @@ public class TestOmContainerLocationCache {
}
@BeforeEach
- @SuppressFBWarnings("ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD")
public void beforeEach() {
- testContainerId++;
+ CONTAINER_ID.getAndIncrement();
Mockito.reset(mockScmBlockLocationProtocol, mockScmContainerClient,
mockDn1Protocol, mockDn2Protocol);
when(mockDn1Protocol.getPipeline()).thenReturn(createPipeline(DN1));
@@ -252,9 +251,9 @@ public class TestOmContainerLocationCache {
public void containerCachedInHappyCase() throws Exception {
byte[] data = "Test content".getBytes(UTF_8);
- mockScmAllocationOnDn1(testContainerId, 1L);
+ mockScmAllocationOnDn1(CONTAINER_ID.get(), 1L);
mockWriteChunkResponse(mockDn1Protocol);
- mockPutBlockResponse(mockDn1Protocol, testContainerId, 1L, data);
+ mockPutBlockResponse(mockDn1Protocol, CONTAINER_ID.get(), 1L, data);
OzoneBucket bucket = objectStore.getVolume(VOLUME_NAME)
.getBucket(BUCKET_NAME);
@@ -265,15 +264,15 @@ public class TestOmContainerLocationCache {
IOUtils.write(data, os);
}
- mockScmGetContainerPipeline(testContainerId, DN1);
+ mockScmGetContainerPipeline(CONTAINER_ID.get(), DN1);
// Read keyName1.
OzoneKeyDetails key1 = bucket.getKey(keyName1);
verify(mockScmContainerClient, times(1))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
- mockGetBlock(mockDn1Protocol, testContainerId, 1L, data, null, null);
- mockReadChunk(mockDn1Protocol, testContainerId, 1L, data, null, null);
+ mockGetBlock(mockDn1Protocol, CONTAINER_ID.get(), 1L, data, null, null);
+ mockReadChunk(mockDn1Protocol, CONTAINER_ID.get(), 1L, data, null, null);
try (InputStream is = key1.getContent()) {
byte[] read = new byte[(int) key1.getDataSize()];
IOUtils.read(is, read);
@@ -294,7 +293,7 @@ public class TestOmContainerLocationCache {
}
// Ensure SCM is not called once again.
verify(mockScmContainerClient, times(1))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
}
private static Stream<Arguments> errorsTriggerRefresh() {
@@ -326,9 +325,9 @@ public class TestOmContainerLocationCache {
Exception dnException, Result dnResponseCode) throws Exception {
byte[] data = "Test content".getBytes(UTF_8);
- mockScmAllocationOnDn1(testContainerId, 1L);
+ mockScmAllocationOnDn1(CONTAINER_ID.get(), 1L);
mockWriteChunkResponse(mockDn1Protocol);
- mockPutBlockResponse(mockDn1Protocol, testContainerId, 1L, data);
+ mockPutBlockResponse(mockDn1Protocol, CONTAINER_ID.get(), 1L, data);
OzoneBucket bucket = objectStore.getVolume(VOLUME_NAME)
.getBucket(BUCKET_NAME);
@@ -338,20 +337,20 @@ public class TestOmContainerLocationCache {
IOUtils.write(data, os);
}
- mockScmGetContainerPipeline(testContainerId, DN1);
+ mockScmGetContainerPipeline(CONTAINER_ID.get(), DN1);
OzoneKeyDetails key1 = bucket.getKey(keyName);
verify(mockScmContainerClient, times(1))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
try (InputStream is = key1.getContent()) {
// Simulate dn1 got errors, and the container's moved to dn2.
- mockGetBlock(mockDn1Protocol, testContainerId, 1L, null,
+ mockGetBlock(mockDn1Protocol, CONTAINER_ID.get(), 1L, null,
dnException, dnResponseCode);
- mockScmGetContainerPipeline(testContainerId, DN2);
- mockGetBlock(mockDn2Protocol, testContainerId, 1L, data, null, null);
- mockReadChunk(mockDn2Protocol, testContainerId, 1L, data, null, null);
+ mockScmGetContainerPipeline(CONTAINER_ID.get(), DN2);
+ mockGetBlock(mockDn2Protocol, CONTAINER_ID.get(), 1L, data, null, null);
+ mockReadChunk(mockDn2Protocol, CONTAINER_ID.get(), 1L, data, null, null);
byte[] read = new byte[(int) key1.getDataSize()];
IOUtils.read(is, read);
@@ -360,7 +359,7 @@ public class TestOmContainerLocationCache {
// verify SCM is called one more time to refresh.
verify(mockScmContainerClient, times(2))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
}
/**
@@ -374,9 +373,9 @@ public class TestOmContainerLocationCache {
Exception dnException, Result dnResponseCode) throws Exception {
byte[] data = "Test content".getBytes(UTF_8);
- mockScmAllocationOnDn1(testContainerId, 1L);
+ mockScmAllocationOnDn1(CONTAINER_ID.get(), 1L);
mockWriteChunkResponse(mockDn1Protocol);
- mockPutBlockResponse(mockDn1Protocol, testContainerId, 1L, data);
+ mockPutBlockResponse(mockDn1Protocol, CONTAINER_ID.get(), 1L, data);
OzoneBucket bucket = objectStore.getVolume(VOLUME_NAME)
.getBucket(BUCKET_NAME);
@@ -386,21 +385,21 @@ public class TestOmContainerLocationCache {
IOUtils.write(data, os);
}
- mockScmGetContainerPipeline(testContainerId, DN1);
+ mockScmGetContainerPipeline(CONTAINER_ID.get(), DN1);
OzoneKeyDetails key1 = bucket.getKey(keyName);
verify(mockScmContainerClient, times(1))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
try (InputStream is = key1.getContent()) {
// simulate dn1 goes down, the container's to dn2.
- mockGetBlock(mockDn1Protocol, testContainerId, 1L, data, null, null);
- mockReadChunk(mockDn1Protocol, testContainerId, 1L, null,
+ mockGetBlock(mockDn1Protocol, CONTAINER_ID.get(), 1L, data, null, null);
+ mockReadChunk(mockDn1Protocol, CONTAINER_ID.get(), 1L, null,
dnException, dnResponseCode);
- mockScmGetContainerPipeline(testContainerId, DN2);
- mockGetBlock(mockDn2Protocol, testContainerId, 1L, data, null, null);
- mockReadChunk(mockDn2Protocol, testContainerId, 1L, data, null, null);
+ mockScmGetContainerPipeline(CONTAINER_ID.get(), DN2);
+ mockGetBlock(mockDn2Protocol, CONTAINER_ID.get(), 1L, data, null, null);
+ mockReadChunk(mockDn2Protocol, CONTAINER_ID.get(), 1L, data, null, null);
byte[] read = new byte[(int) key1.getDataSize()];
IOUtils.read(is, read);
@@ -409,7 +408,7 @@ public class TestOmContainerLocationCache {
// verify SCM is called one more time to refresh.
verify(mockScmContainerClient, times(2))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
}
/**
@@ -423,9 +422,9 @@ public class TestOmContainerLocationCache {
throws Exception {
byte[] data = "Test content".getBytes(UTF_8);
- mockScmAllocationOnDn1(testContainerId, 1L);
+ mockScmAllocationOnDn1(CONTAINER_ID.get(), 1L);
mockWriteChunkResponse(mockDn1Protocol);
- mockPutBlockResponse(mockDn1Protocol, testContainerId, 1L, data);
+ mockPutBlockResponse(mockDn1Protocol, CONTAINER_ID.get(), 1L, data);
OzoneBucket bucket = objectStore.getVolume(VOLUME_NAME)
.getBucket(BUCKET_NAME);
@@ -435,16 +434,17 @@ public class TestOmContainerLocationCache {
IOUtils.write(data, os);
}
- mockScmGetContainerPipeline(testContainerId, DN1);
+ mockScmGetContainerPipeline(CONTAINER_ID.get(), DN1);
OzoneKeyDetails key1 = bucket.getKey(keyName);
verify(mockScmContainerClient, times(1))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
try (InputStream is = key1.getContent()) {
// simulate dn1 got errors, and the container's moved to dn2.
- mockGetBlock(mockDn1Protocol, testContainerId, 1L, null, ex, errorCode);
+ mockGetBlock(mockDn1Protocol, CONTAINER_ID.get(), 1L, null, ex,
+ errorCode);
assertThrows(expectedEx,
() -> IOUtils.read(is, new byte[(int) key1.getDataSize()]));
@@ -452,7 +452,7 @@ public class TestOmContainerLocationCache {
// verify SCM is called one more time to refresh.
verify(mockScmContainerClient, times(1))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
}
/**
@@ -466,9 +466,9 @@ public class TestOmContainerLocationCache {
Class<? extends Exception> expectedEx) throws Exception {
byte[] data = "Test content".getBytes(UTF_8);
- mockScmAllocationOnDn1(testContainerId, 1L);
+ mockScmAllocationOnDn1(CONTAINER_ID.get(), 1L);
mockWriteChunkResponse(mockDn1Protocol);
- mockPutBlockResponse(mockDn1Protocol, testContainerId, 1L, data);
+ mockPutBlockResponse(mockDn1Protocol, CONTAINER_ID.get(), 1L, data);
OzoneBucket bucket = objectStore.getVolume(VOLUME_NAME)
.getBucket(BUCKET_NAME);
@@ -478,17 +478,17 @@ public class TestOmContainerLocationCache {
IOUtils.write(data, os);
}
- mockScmGetContainerPipeline(testContainerId, DN1);
+ mockScmGetContainerPipeline(CONTAINER_ID.get(), DN1);
OzoneKeyDetails key1 = bucket.getKey(keyName);
verify(mockScmContainerClient, times(1))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
try (InputStream is = key1.getContent()) {
// simulate dn1 got errors, and the container's moved to dn2.
- mockGetBlock(mockDn1Protocol, testContainerId, 1L, data, null, null);
- mockReadChunk(mockDn1Protocol, testContainerId, 1L, null,
+ mockGetBlock(mockDn1Protocol, CONTAINER_ID.get(), 1L, data, null, null);
+ mockReadChunk(mockDn1Protocol, CONTAINER_ID.get(), 1L, null,
dnException, dnResponseCode);
assertThrows(expectedEx,
@@ -497,7 +497,7 @@ public class TestOmContainerLocationCache {
// verify SCM is called one more time to refresh.
verify(mockScmContainerClient, times(1))
- .getContainerWithPipelineBatch(newHashSet(testContainerId));
+ .getContainerWithPipelineBatch(newHashSet(CONTAINER_ID.get()));
}
private void mockPutBlockResponse(XceiverClientSpi mockDnProtocol,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
index 4ad0c804f1..83f0ff3144 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
@@ -23,7 +23,6 @@ import java.util.List;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -151,7 +150,6 @@ import static java.nio.charset.StandardCharsets.UTF_8;
* Test OmSnapshot bucket interface.
*/
@RunWith(Parameterized.class)
-@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT")
public class TestOmSnapshot {
static {
diff --git a/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml
b/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml
index 493d262390..400cb170cb 100644
--- a/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml
@@ -62,8 +62,8 @@
<Bug pattern="UUF_UNUSED_PUBLIC_OR_PROTECTED_FIELD" />
</Match>
<Match>
- <Class
name="org.apache.hadoop.ozone.om.request.volume.TestOMVolumeCreateRequest"/>
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
+ <Class name="org.apache.hadoop.ozone.om.snapshot.TestSnapshotDiffManager"
/>
+ <Bug pattern="RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT" />
</Match>
<Match>
<Class name="org.apache.hadoop.ozone.security.TestOzoneManagerBlockToken"/>
@@ -81,6 +81,10 @@
<Class name="org.apache.hadoop.ozone.security.TestOzoneTokenIdentifier"/>
<Bug pattern="UC_USELESS_OBJECT" />
</Match>
+ <Match>
+ <Class
name="org.apache.hadoop.ozone.om.snapshot.TestFSODirectoryPathResolver"/>
+ <Bug pattern="DMI_HARDCODED_ABSOLUTE_FILENAME" />
+ </Match>
<Match>
<Class
name="org.apache.hadoop.ozone.om.upgrade.TestOMLayoutFeatureAspect"/>
<Bug pattern="BC_IMPOSSIBLE_CAST" />
diff --git a/hadoop-ozone/ozone-manager/pom.xml
b/hadoop-ozone/ozone-manager/pom.xml
index 4fbdb8aac1..2cd6e4e8be 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -106,11 +106,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
<artifactId>hdds-common</artifactId>
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java
index 36fe2c1123..970cd8b95f 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java
@@ -16,7 +16,6 @@
*/
package org.apache.hadoop.ozone.om;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
@@ -26,8 +25,11 @@ import org.apache.hadoop.ozone.om.protocol.OMAdminProtocol;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
+import org.apache.ratis.util.MemoizedSupplier;
-import java.util.concurrent.atomic.AtomicReference;
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.Supplier;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL;
import static org.apache.hadoop.ozone.om.OMConfigKeys
@@ -40,8 +42,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys
@InterfaceStability.Unstable
public final class OMPolicyProvider extends PolicyProvider {
- private static AtomicReference<OMPolicyProvider> atomicReference =
- new AtomicReference<>();
+ private static final Supplier<OMPolicyProvider> SUPPLIER =
+ MemoizedSupplier.valueOf(OMPolicyProvider::new);
private OMPolicyProvider() {
}
@@ -49,26 +51,22 @@ public final class OMPolicyProvider extends PolicyProvider {
@Private
@Unstable
public static OMPolicyProvider getInstance() {
- if (atomicReference.get() == null) {
- atomicReference.compareAndSet(null, new OMPolicyProvider());
- }
- return atomicReference.get();
+ return SUPPLIER.get();
}
- private static final Service[] OM_SERVICES =
- new Service[]{
+ private static final List<Service> OM_SERVICES =
+ Arrays.asList(
new Service(OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL,
OzoneManagerProtocol.class),
new Service(OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL,
OMInterServiceProtocol.class),
new Service(OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL,
OMAdminProtocol.class)
- };
+ );
- @SuppressFBWarnings("EI_EXPOSE_REP")
@Override
public Service[] getServices() {
- return OM_SERVICES;
+ return OM_SERVICES.toArray(new Service[0]);
}
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java
index 4fdd9f4fa4..7560d453eb 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java
@@ -17,13 +17,15 @@
*/
package org.apache.hadoop.ozone.om;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.ratis.util.MemoizedSupplier;
+
+import java.util.function.Supplier;
/**
* This class is for maintaining Snapshot Manager statistics.
@@ -37,23 +39,16 @@ public final class OmSnapshotMetrics implements
OmMetadataReaderMetrics {
private OmSnapshotMetrics() {
}
- private static OmSnapshotMetrics instance;
-
- @SuppressFBWarnings("DC_DOUBLECHECK")
- public static OmSnapshotMetrics getInstance() {
- if (instance != null) {
- return instance;
- }
-
- synchronized (OmSnapshotMetrics.class) {
- if (instance == null) {
+ private static final Supplier<OmSnapshotMetrics> SUPPLIER =
+ MemoizedSupplier.valueOf(() -> {
MetricsSystem ms = DefaultMetricsSystem.instance();
- instance = ms.register(SOURCE_NAME,
+ return ms.register(SOURCE_NAME,
"Snapshot Manager Metrics",
new OmSnapshotMetrics());
- }
- }
- return instance;
+ });
+
+ public static OmSnapshotMetrics getInstance() {
+ return SUPPLIER.get();
}
private @Metric
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/FSODirectoryPathResolver.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/FSODirectoryPathResolver.java
index d37a37f20b..10c09c3f12 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/FSODirectoryPathResolver.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/FSODirectoryPathResolver.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.om.snapshot;
import com.google.common.collect.Sets;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
@@ -26,7 +25,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import java.io.IOException;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
@@ -36,7 +34,7 @@ import java.util.Queue;
import java.util.Set;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+import static org.apache.hadoop.ozone.OzoneConsts.ROOT_PATH;
/**
* Class to resolve absolute paths for FSO DirectoryInfo Objects.
@@ -70,7 +68,6 @@ public class FSODirectoryPathResolver implements
ObjectPathResolver {
* false exception will be thrown.
* @return Map of Path corresponding to provided directory object IDs
*/
- @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME")
@Override
public Map<Long, Path> getAbsolutePathForObjectIDs(
Optional<Set<Long>> dirObjIds, boolean skipUnresolvedObjs)
@@ -83,7 +80,7 @@ public class FSODirectoryPathResolver implements
ObjectPathResolver {
Set<Long> objIds = Sets.newHashSet(dirObjIds.get());
Map<Long, Path> objectIdPathMap = new HashMap<>();
Queue<Pair<Long, Path>> objectIdPathVals = new LinkedList<>();
- Pair<Long, Path> root = Pair.of(bucketId, Paths.get(OZONE_URI_DELIMITER));
+ Pair<Long, Path> root = Pair.of(bucketId, ROOT_PATH);
objectIdPathVals.add(root);
addToPathMap(root, objIds, objectIdPathMap);
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
index e0cba9575f..651ed06cbe 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.snapshot;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.io.file.PathUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.StringUtils;
@@ -1246,7 +1245,6 @@ public class SnapshotDiffManager implements AutoCloseable
{
}
}
- @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME")
private String resolveBucketRelativePath(boolean isFSOBucket,
final Optional<Map<Long, Path>> parentIdMap, byte[] keyVal,
boolean skipUnresolvedObjIds)
@@ -1268,7 +1266,7 @@ public class SnapshotDiffManager implements AutoCloseable
{
return parentIdMap.map(m -> m.get(parentId).resolve(splitKey[1]))
.get().toString().substring(1);
}
- return Paths.get(OzoneConsts.OZONE_URI_DELIMITER).resolve(key).toString()
+ return OzoneConsts.ROOT_PATH.resolve(key).toString()
.substring(1);
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java
index 56a706508b..7662e47d86 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.ozone.om;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.ozone.om.multitenant.AuthorizerLock;
import org.apache.hadoop.ozone.om.multitenant.AuthorizerLockImpl;
import org.apache.ozone.test.GenericTestUtils;
@@ -43,7 +42,6 @@ public class TestAuthorizerLockImpl {
* Tests StampedLock behavior.
*/
@Test
- @SuppressFBWarnings("IMSE_DONT_CATCH_IMSE")
public void testStampedLockBehavior() throws InterruptedException {
final AuthorizerLock authorizerLock = new AuthorizerLockImpl();
@@ -55,20 +53,14 @@ public class TestAuthorizerLockImpl {
authorizerLock.unlockWrite(writeLockStamp);
// Case 1: An incorrect stamp won't be able to unlock, throws IMSE
- readLockStamp = authorizerLock.tryReadLock(100);
- try {
- authorizerLock.unlockRead(readLockStamp - 1L);
- Assertions.fail("Should have thrown IllegalMonitorStateException");
- } catch (IllegalMonitorStateException ignored) {
- }
- authorizerLock.unlockRead(readLockStamp);
- writeLockStamp = authorizerLock.tryWriteLock(100);
- try {
- authorizerLock.unlockWrite(writeLockStamp - 1L);
- Assertions.fail("Should have thrown IllegalMonitorStateException");
- } catch (IllegalMonitorStateException ignored) {
- }
- authorizerLock.unlockWrite(writeLockStamp);
+ long stamp2 = authorizerLock.tryReadLock(100);
+ Assertions.assertThrows(IllegalMonitorStateException.class,
+ () -> authorizerLock.unlockRead(stamp2 - 1));
+ authorizerLock.unlockRead(stamp2);
+ long stamp3 = authorizerLock.tryWriteLock(100);
+ Assertions.assertThrows(IllegalMonitorStateException.class,
+ () -> authorizerLock.unlockWrite(stamp3 - 1));
+ authorizerLock.unlockWrite(stamp3);
// Case 2: Read lock is reentrant; Write lock is exclusive
long readLockStamp1 = authorizerLock.tryReadLock(100);
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
index ad25208ddd..462c2a3b88 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.ozone.om;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -273,7 +272,6 @@ public class TestOmSnapshotManager {
* follower/db.snapshots/checkpointState/snap2/s1.sst
*/
@Test
- @SuppressFBWarnings({"NP_NULL_ON_SOME_PATH"})
public void testHardLinkCreation() throws IOException {
// Map of links to files on the leader
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
index bb5ae54c8d..1fb287f89e 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
@@ -166,6 +166,7 @@ public class TestOMVolumeCreateRequest extends
TestOMVolumeRequest {
new OMVolumeCreateRequest(originalRequest);
modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager);
+ omVolumeCreateRequest = new OMVolumeCreateRequest(modifiedRequest);
omClientResponse =
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 2L,
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java
index 22120409f9..7e662d90eb 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.snapshot;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
@@ -111,7 +110,6 @@ public class TestFSODirectoryPathResolver {
return dirInfos;
}
- @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME")
@Test
public void testGetAbsolutePathForValidObjectIDs() throws IOException {
Map<Integer, List<Integer>> dirMap = ImmutableMap.of(
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
index cd67b89559..8d8921ddff 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
@@ -21,7 +21,6 @@ import com.google.common.cache.CacheLoader;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.StringUtils;
@@ -632,8 +631,6 @@ public class TestSnapshotDiffManager {
* In the case of reading tombstones old Snapshot Persistent map should have
* object Ids in the range 50-100 & should be empty otherwise
*/
- @SuppressFBWarnings({"DLS_DEAD_LOCAL_STORE",
- "RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT"})
@ParameterizedTest
@CsvSource({"false," + OmMetadataManagerImpl.DIRECTORY_TABLE,
"true," + OmMetadataManagerImpl.DIRECTORY_TABLE,
diff --git a/hadoop-ozone/ozonefs-common/pom.xml
b/hadoop-ozone/ozonefs-common/pom.xml
index de50a4f7cf..3cf0b8cea7 100644
--- a/hadoop-ozone/ozonefs-common/pom.xml
+++ b/hadoop-ozone/ozonefs-common/pom.xml
@@ -41,11 +41,6 @@
<groupId>org.apache.ozone</groupId>
<artifactId>hdds-hadoop-dependency-client</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
<artifactId>ozone-client</artifactId>
diff --git
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index de8e0df6b5..dbe3b517e5 100644
---
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -1277,7 +1277,7 @@ public class BasicOzoneFileSystem extends FileSystem {
FileStatus fileStatus = constructFileStatus(fileStatusAdapter);
BlockLocation[] blockLocations = fileStatusAdapter.getBlockLocations();
- if (blockLocations == null || blockLocations.length == 0) {
+ if (blockLocations.length == 0) {
return fileStatus;
}
return new LocatedFileStatus(fileStatus, blockLocations);
diff --git
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index 3ed415e931..8983cba904 100644
---
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -1491,7 +1491,7 @@ public class BasicRootedOzoneFileSystem extends
FileSystem {
FileStatus convertFileStatus(FileStatusAdapter fileStatusAdapter) {
FileStatus fileStatus = constructFileStatus(fileStatusAdapter);
BlockLocation[] blockLocations = fileStatusAdapter.getBlockLocations();
- if (blockLocations == null || blockLocations.length == 0) {
+ if (blockLocations.length == 0) {
return fileStatus;
}
return new LocatedFileStatus(fileStatus, blockLocations);
diff --git
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
index a9e482ca4f..f92f8d9570 100644
---
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
+++
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.fs.ozone;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
/**
* Class to hold the internal information of a FileStatus.
@@ -46,7 +49,7 @@ public final class FileStatusAdapter {
private final String owner;
private final String group;
private final Path symlink;
- private final BlockLocation[] blockLocations;
+ private final List<BlockLocation> blockLocations;
private final boolean isEncrypted;
@@ -71,7 +74,7 @@ public final class FileStatusAdapter {
this.owner = owner;
this.group = group;
this.symlink = symlink;
- this.blockLocations = locations.clone();
+ this.blockLocations = new ArrayList<>(Arrays.asList(locations));
this.isEncrypted = isEncrypted;
this.isErasureCoded = isErasureCoded;
}
@@ -137,9 +140,8 @@ public final class FileStatusAdapter {
return isErasureCoded;
}
- @SuppressFBWarnings("EI_EXPOSE_REP")
public BlockLocation[] getBlockLocations() {
- return blockLocations;
+ return blockLocations.toArray(new BlockLocation[0]);
}
@Override
diff --git a/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml
b/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml
index 5656496168..2fe78fbf2f 100644
--- a/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml
@@ -47,6 +47,10 @@
<Class
name="org.apache.hadoop.ozone.recon.spi.impl.TestOzoneManagerServiceProviderImpl"/>
<Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
</Match>
+ <Match>
+ <Class name="~org.apache.hadoop.ozone.recon.tasks.TestNSSummaryTask.*"/>
+ <Bug pattern="SIC_INNER_SHOULD_BE_STATIC" /> <!-- Only non-static inner
classes can be @Nested -->
+ </Match>
<Match>
<Class name="org.apache.hadoop.ozone.recon.TestReconUtils"/>
<Bug pattern="OBL_UNSATISFIED_OBLIGATION_EXCEPTION_EDGE" />
diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml
index 31c520d9d5..933ac426e1 100644
--- a/hadoop-ozone/recon/pom.xml
+++ b/hadoop-ozone/recon/pom.xml
@@ -380,11 +380,6 @@
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.javassist</groupId>
<artifactId>javassist</artifactId>
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
index df7939c5b3..76684844c7 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
@@ -20,23 +20,25 @@ package org.apache.hadoop.ozone.recon.scm;
import static
org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL;
-import java.util.concurrent.atomic.AtomicReference;
+import java.util.Collections;
+import java.util.List;
+import java.util.function.Supplier;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
import org.apache.hadoop.ozone.protocol.ReconDatanodeProtocol;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
+import org.apache.ratis.util.MemoizedSupplier;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* {@link PolicyProvider} for Recon protocols.
*/
public final class ReconPolicyProvider extends PolicyProvider {
- private static AtomicReference<ReconPolicyProvider> atomicReference =
- new AtomicReference<>();
+ private static final Supplier<ReconPolicyProvider> SUPPLIER =
+ MemoizedSupplier.valueOf(ReconPolicyProvider::new);
private ReconPolicyProvider() {
}
@@ -44,23 +46,19 @@ public final class ReconPolicyProvider extends
PolicyProvider {
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static ReconPolicyProvider getInstance() {
- if (atomicReference.get() == null) {
- atomicReference.compareAndSet(null, new ReconPolicyProvider());
- }
- return atomicReference.get();
+ return SUPPLIER.get();
}
- private static final Service[] RECON_SERVICES =
- new Service[]{
+ private static final List<Service> RECON_SERVICES =
+ Collections.singletonList(
new Service(
OZONE_RECON_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL,
ReconDatanodeProtocol.class)
- };
+ );
- @SuppressFBWarnings("EI_EXPOSE_REP")
@Override
public Service[] getServices() {
- return RECON_SERVICES;
+ return RECON_SERVICES.toArray(new Service[0]);
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
index 4f1761ff51..96f58fdc41 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.recon.tasks;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -45,7 +44,7 @@ import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
-import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Set;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
@@ -60,7 +59,6 @@ import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestRe
* support for OBS buckets. Check that the NSSummary
* for the OBS bucket is null.
*/
-@SuppressFBWarnings
public final class TestNSSummaryTask {
private static ReconNamespaceSummaryManager reconNamespaceSummaryManager;
@@ -267,13 +265,7 @@ public final class TestNSSummaryTask {
.setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
.build();
- OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(
- new ArrayList<OMDBUpdateEvent>() {{
- add(keyEvent1);
- add(keyEvent2);
- }});
-
- return omUpdateEventBatch;
+ return new OMUpdateEventBatch(Arrays.asList(keyEvent1, keyEvent2));
}
@Test
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
index 72bfdd81e7..771d22a121 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.recon.tasks;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -42,7 +41,7 @@ import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
-import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
@@ -57,7 +56,6 @@ import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NS
/**
* Test for NSSummaryTaskWithFSO.
*/
-@SuppressFBWarnings
public final class TestNSSummaryTaskWithFSO {
private static ReconNamespaceSummaryManager reconNamespaceSummaryManager;
@@ -390,18 +388,10 @@ public final class TestNSSummaryTaskWithFSO {
.setTable(omMetadataManager.getDirectoryTable().getName())
.build();
- OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(
- new ArrayList<OMDBUpdateEvent>() {{
- add(keyEvent1);
- add(keyEvent2);
- add(keyEvent3);
- add(keyEvent4);
- add(keyEvent5);
- add(keyEvent6);
- add(keyEvent7);
- }});
-
- return omUpdateEventBatch;
+ return new OMUpdateEventBatch(Arrays.asList(
+ keyEvent1, keyEvent2, keyEvent3, keyEvent4, keyEvent5,
+ keyEvent6, keyEvent7
+ ));
}
@Test
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
index 1e32db78da..ca7a9d86b1 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.recon.tasks;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -45,7 +44,7 @@ import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
-import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
@@ -59,7 +58,6 @@ import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestRe
/**
* Test for NSSummaryTaskWithLegacy.
*/
-@SuppressFBWarnings
public final class TestNSSummaryTaskWithLegacy {
private static ReconNamespaceSummaryManager reconNamespaceSummaryManager;
@@ -429,18 +427,10 @@ public final class TestNSSummaryTaskWithLegacy {
.setTable(omMetadataManager.getKeyTable(getBucketLayout()).getName())
.build();
- OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(
- new ArrayList<OMDBUpdateEvent>() {{
- add(keyEvent1);
- add(keyEvent2);
- add(keyEvent3);
- add(keyEvent4);
- add(keyEvent5);
- add(keyEvent6);
- add(keyEvent7);
- }});
-
- return omUpdateEventBatch;
+ return new OMUpdateEventBatch(Arrays.asList(
+ keyEvent1, keyEvent2, keyEvent3, keyEvent4, keyEvent5,
+ keyEvent6, keyEvent7
+ ));
}
@Test
diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml
index cfa534b1b4..d7e619ee35 100644
--- a/hadoop-ozone/s3gateway/pom.xml
+++ b/hadoop-ozone/s3gateway/pom.xml
@@ -173,11 +173,6 @@
<artifactId>mockito-junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 8e96973f82..6ab3a4ba7f 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.ozone.s3.endpoint;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -99,7 +98,6 @@ public class BucketEndpoint extends EndpointBase {
* for more details.
*/
@GET
- @SuppressFBWarnings
@SuppressWarnings({"parameternumber", "methodlength"})
public Response get(
@PathParam("bucket") String bucketName,
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index 118474827f..93282683cf 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -101,11 +101,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.kohsuke.metainf-services</groupId>
<artifactId>metainf-services</artifactId>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
index 725b2b89fb..32f9c045aa 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
@@ -34,7 +34,6 @@ import java.util.ArrayList;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ozone.audit.parser.model.AuditEntry;
@@ -99,7 +98,6 @@ public final class DatabaseHelper {
return true;
}
- @SuppressFBWarnings("REC_CATCH_EXCEPTION")
private static boolean insertAudits(String dbName, String logs)
throws Exception {
try (Connection connection = getConnection(dbName);
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java
index 4ad01f127f..022eacde4a 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java
@@ -18,19 +18,19 @@ package org.apache.hadoop.ozone.freon;
import com.codahale.metrics.Timer;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import picocli.CommandLine;
import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
import java.util.concurrent.Callable;
import java.util.concurrent.ThreadLocalRandom;
import java.util.HashMap;
@@ -141,7 +141,7 @@ public class OzoneClientKeyReadWriteListOps extends
BaseFreonGenerator
private static final Logger LOG =
LoggerFactory.getLogger(OzoneClientKeyReadWriteListOps.class);
- private static AtomicLong nextNumber = new AtomicLong();
+ private static final AtomicLong NEXT_NUMBER = new AtomicLong();
/**
* Task type of read task, or write task.
*/
@@ -159,31 +159,32 @@ public class OzoneClientKeyReadWriteListOps extends
BaseFreonGenerator
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
clientCount = getThreadNo();
ozoneClients = new OzoneClient[clientCount];
- for (int i = 0; i < clientCount; i++) {
- ozoneClients[i] = createOzoneClient(omServiceID, ozoneConfiguration);
- }
+ try {
+ for (int i = 0; i < clientCount; i++) {
+ ozoneClients[i] = createOzoneClient(omServiceID, ozoneConfiguration);
+ }
- ensureVolumeAndBucketExist(ozoneClients[0], volumeName, bucketName);
+ ensureVolumeAndBucketExist(ozoneClients[0], volumeName, bucketName);
- timer = getMetrics().timer("key-read-write-list");
- if (objectSizeInBytes >= 0) {
- keyContent = RandomUtils.nextBytes(objectSizeInBytes);
- }
- if (kg == null) {
- kg = new KeyGeneratorUtil();
- }
- runTests(this::readWriteListKeys);
-
- for (int i = 0; i < clientCount; i++) {
- if (ozoneClients[i] != null) {
- ozoneClients[i].close();
+ timer = getMetrics().timer("key-read-write-list");
+ if (objectSizeInBytes >= 0) {
+ keyContent = RandomUtils.nextBytes(objectSizeInBytes);
+ }
+ if (kg == null) {
+ kg = new KeyGeneratorUtil();
+ }
+ runTests(this::readWriteListKeys);
+ } finally {
+ for (int i = 0; i < clientCount; i++) {
+ if (ozoneClients[i] != null) {
+ ozoneClients[i].close();
+ }
}
}
return null;
}
- public void readWriteListKeys(long counter) throws RuntimeException,
- IOException {
+ private void readWriteListKeys(long counter) throws RuntimeException {
int clientIndex = (int)((counter) % clientCount);
TaskType taskType = decideReadWriteOrListTask();
String keyName = getKeyName();
@@ -213,47 +214,38 @@ public class OzoneClientKeyReadWriteListOps extends
BaseFreonGenerator
});
}
- @SuppressFBWarnings
+
public void processReadTasks(String keyName, OzoneClient client)
throws RuntimeException, IOException {
OzoneKeyDetails keyDetails = client.getProxy().
getKeyDetails(volumeName, bucketName, keyName);
if (!readMetadataOnly) {
- byte[] data = new byte[objectSizeInBytes];
- try (OzoneInputStream introStream = keyDetails.getContent()) {
- introStream.read(data);
- } catch (Exception ex) {
- throw ex;
+ try (InputStream input = keyDetails.getContent()) {
+ byte[] ignored = IOUtils.readFully(input, objectSizeInBytes);
}
}
}
public void processWriteTasks(String keyName, OzoneClient ozoneClient)
throws RuntimeException, IOException {
- try (OzoneOutputStream out =
- ozoneClient.getProxy().createKey(volumeName, bucketName,
- keyName, objectSizeInBytes, null, new HashMap())) {
+ try (OutputStream out = ozoneClient.getProxy().createKey(
+ volumeName, bucketName, keyName, objectSizeInBytes, null,
+ new HashMap<>())) {
out.write(keyContent);
- } catch (Exception ex) {
- throw ex;
}
}
public void processListTasks(OzoneClient ozoneClient)
throws RuntimeException, IOException {
- try {
- ozoneClient.getProxy()
- .listKeys(volumeName, bucketName, getPrefix(), null, maxListResult);
- } catch (Exception ex) {
- throw ex;
- }
+ ozoneClient.getProxy()
+ .listKeys(volumeName, bucketName, getPrefix(), null, maxListResult);
}
public TaskType decideReadWriteOrListTask() {
int tmp = ThreadLocalRandom.current().nextInt(1, 101);
if (tmp <= percentageRead) {
return TaskType.READ_TASK;
- } else if (tmp > percentageRead && tmp <= percentageRead + percentageList)
{
+ } else if (tmp <= percentageRead + percentageList) {
return TaskType.LIST_TASK;
} else {
return TaskType.WRITE_TASK;
@@ -264,7 +256,7 @@ public class OzoneClientKeyReadWriteListOps extends
BaseFreonGenerator
StringBuilder keyNameSb = new StringBuilder();
long next;
if (linear) {
- next = startIndex + nextNumber.getAndUpdate(x -> (x + 1) % range);
+ next = startIndex + NEXT_NUMBER.getAndUpdate(x -> (x + 1) % range);
} else {
next = ThreadLocalRandom.current().
nextLong(startIndex, startIndex + range);
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index ee5ac6140b..87608250f9 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -64,7 +64,6 @@ import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.google.common.annotations.VisibleForTesting;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.time.DurationFormatUtils;
@@ -788,7 +787,6 @@ public final class RandomKeyGenerator implements
Callable<Void> {
}
}
- @SuppressFBWarnings("REC_CATCH_EXCEPTION")
private boolean createKey(long globalKeyNumber) {
int globalBucketNumber = (int) (globalKeyNumber % totalBucketCount);
long keyNumber = globalKeyNumber / totalBucketCount;
diff --git a/pom.xml b/pom.xml
index 6b96b85b40..46d4187c24 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1415,12 +1415,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xs
<artifactId>hadoop-cloud-storage</artifactId>
<version>${hadoop.version}</version>
</dependency>
- <dependency>
- <groupId>com.github.spotbugs</groupId>
- <artifactId>spotbugs-annotations</artifactId>
- <version>${spotbugs.version}</version>
- <scope>provided</scope>
- </dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]