This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new ba44e12da3 HDDS-12382. Fix other spotbugs warnings (#7969)
ba44e12da3 is described below
commit ba44e12da3c0417e29c6ff69aef5ff982e50ccab
Author: Peter Lee <[email protected]>
AuthorDate: Wed Feb 26 02:02:48 2025 +0800
HDDS-12382. Fix other spotbugs warnings (#7969)
---
.../client/dev-support/findbugsExcludeFile.xml | 13 --------
.../hdds/scm/storage/TestBlockInputStream.java | 19 ++++++++---
.../hdds/scm/storage/TestChunkInputStream.java | 24 +++++++++-----
.../framework/dev-support/findbugsExcludeFile.xml | 10 ------
.../server/http/TestRatisDropwizardExports.java | 2 +-
.../server-scm/dev-support/findbugsExcludeFile.xml | 38 ----------------------
.../scm/TestStorageContainerManagerHttpServer.java | 2 +-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 4 +--
.../dev-support/findbugsExcludeFile.xml | 3 --
.../fs/http/server/metrics/TestHttpFSMetrics.java | 22 ++++++++-----
.../TestGenerateOzoneRequiredConfigurations.java | 3 +-
11 files changed, 50 insertions(+), 90 deletions(-)
diff --git a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
index 7b874905bf..2951138b2f 100644
--- a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml
@@ -15,21 +15,8 @@
limitations under the License.
-->
<FindBugsFilter>
- <!-- Test -->
<Match>
<Class name="org.apache.hadoop.hdds.scm.storage.ByteArrayReader"></Class>
<Bug pattern="EI_EXPOSE_REP2" /> <!-- "Deep copy byte[] has bad impact on
performance" -->
</Match>
- <Match>
- <Class name="org.apache.hadoop.hdds.scm.storage.TestBufferPool"></Class>
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.storage.TestChunkInputStream"></Class>
- <Bug pattern="RR_NOT_CHECKED" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.storage.TestBlockInputStream"></Class>
- <Bug pattern="RR_NOT_CHECKED" />
- </Match>
</FindBugsFilter>
diff --git
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
index c86b3e077c..423eedae49 100644
---
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
+++
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
@@ -204,7 +204,8 @@ public void testRead() throws Exception {
// the read should result in 3 ChunkInputStream reads
seekAndVerify(50);
byte[] b = new byte[200];
- blockStream.read(b, 0, 200);
+ int bytesRead = blockStream.read(b, 0, 200);
+ assertEquals(200, bytesRead, "Expected to read 200 bytes");
matchWithInputData(b, 50, 200);
// The new position of the blockInputStream should be the last index read
@@ -252,12 +253,14 @@ public void testSeekAndRead() throws Exception {
// Seek to a position and read data
seekAndVerify(50);
byte[] b1 = new byte[100];
- blockStream.read(b1, 0, 100);
+ int bytesRead1 = blockStream.read(b1, 0, 100);
+ assertEquals(100, bytesRead1, "Expected to read 100 bytes");
matchWithInputData(b1, 50, 100);
// Next read should start from the position of the last read + 1 i.e. 100
byte[] b2 = new byte[100];
- blockStream.read(b2, 0, 100);
+ int bytesRead2 = blockStream.read(b2, 0, 100);
+ assertEquals(100, bytesRead2, "Expected to read 100 bytes");
matchWithInputData(b2, 150, 100);
}
@@ -280,7 +283,8 @@ public void testRefreshPipelineFunction() throws Exception {
assertFalse(isRefreshed.get());
seekAndVerify(50);
byte[] b = new byte[200];
- blockInputStreamWithRetry.read(b, 0, 200);
+ int bytesRead = blockInputStreamWithRetry.read(b, 0, 200);
+ assertEquals(200, bytesRead, "Expected to read 200 bytes");
assertThat(logCapturer.getOutput()).contains("Retry read after");
assertTrue(isRefreshed.get());
}
@@ -388,7 +392,12 @@ public void testReadNotRetriedOnOtherException(IOException
ex)
// WHEN
assertThrows(ex.getClass(),
- () -> subject.read(new byte[len], 0, len));
+ () -> {
+ byte[] buffer = new byte[len];
+ int bytesRead = subject.read(buffer, 0, len);
+ // This line should never be reached due to the exception
+ assertEquals(len, bytesRead);
+ });
// THEN
verify(refreshFunction, never()).apply(blockID);
diff --git
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
index 823d718b99..248ea86552 100644
---
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
+++
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
@@ -133,7 +133,8 @@ private void seekAndVerify(int pos) throws Exception {
@Test
public void testFullChunkRead() throws Exception {
byte[] b = new byte[CHUNK_SIZE];
- chunkStream.read(b, 0, CHUNK_SIZE);
+ int bytesRead = chunkStream.read(b, 0, CHUNK_SIZE);
+ assertEquals(CHUNK_SIZE, bytesRead, "Expected to read full chunk size");
matchWithInputData(b, 0, CHUNK_SIZE);
}
@@ -142,7 +143,8 @@ public void testPartialChunkRead() throws Exception {
int len = CHUNK_SIZE / 2;
byte[] b = new byte[len];
- chunkStream.read(b, 0, len);
+ int bytesRead = chunkStream.read(b, 0, len);
+ assertEquals(len, bytesRead, "Expected to read half chunk size");
matchWithInputData(b, 0, len);
@@ -169,7 +171,8 @@ public void testSeek() throws Exception {
// copying chunk data from index 20 to 59 into the buffers (checksum
// boundaries).
byte[] b = new byte[30];
- chunkStream.read(b, 0, 30);
+ int bytesRead = chunkStream.read(b, 0, 30);
+ assertEquals(30, bytesRead, "Expected to read 30 bytes");
matchWithInputData(b, 25, 30);
matchWithInputData(chunkStream.getReadByteBuffers(), 20, 40);
@@ -194,7 +197,8 @@ public void testSeek() throws Exception {
// released and hence chunkPosition updated with current position of chunk.
seekAndVerify(25);
b = new byte[15];
- chunkStream.read(b, 0, 15);
+ int bytesRead2 = chunkStream.read(b, 0, 15);
+ assertEquals(15, bytesRead2, "Expected to read 15 bytes");
matchWithInputData(b, 25, 15);
assertEquals(40, chunkStream.getChunkPosition());
}
@@ -204,19 +208,22 @@ public void testSeekAndRead() throws Exception {
// Seek to a position and read data
seekAndVerify(50);
byte[] b1 = new byte[20];
- chunkStream.read(b1, 0, 20);
+ int bytesRead1 = chunkStream.read(b1, 0, 20);
+ assertEquals(20, bytesRead1, "Expected to read 20 bytes");
matchWithInputData(b1, 50, 20);
// Next read should start from the position of the last read + 1 i.e. 70
byte[] b2 = new byte[20];
- chunkStream.read(b2, 0, 20);
+ int bytesRead2 = chunkStream.read(b2, 0, 20);
+ assertEquals(20, bytesRead2, "Expected to read 20 bytes");
matchWithInputData(b2, 70, 20);
}
@Test
public void testUnbuffered() throws Exception {
byte[] b1 = new byte[20];
- chunkStream.read(b1, 0, 20);
+ int bytesRead = chunkStream.read(b1, 0, 20);
+ assertEquals(20, bytesRead, "Expected to read 20 bytes");
matchWithInputData(b1, 0, 20);
chunkStream.unbuffer();
@@ -225,7 +232,8 @@ public void testUnbuffered() throws Exception {
// Next read should start from the position of the last read + 1 i.e. 20
byte[] b2 = new byte[20];
- chunkStream.read(b2, 0, 20);
+ int bytesRead2 = chunkStream.read(b2, 0, 20);
+ assertEquals(20, bytesRead2, "Expected to read 20 bytes");
matchWithInputData(b2, 20, 20);
}
diff --git a/hadoop-hdds/framework/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/framework/dev-support/findbugsExcludeFile.xml
index 2f6d6d30ef..ad6c2f5f99 100644
--- a/hadoop-hdds/framework/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/framework/dev-support/findbugsExcludeFile.xml
@@ -19,14 +19,4 @@
<Class name="org.apache.hadoop.hdds.utils.ProtocolMessageMetrics"></Class>
<Bug pattern="RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT" />
</Match>
-
- <!-- Test -->
- <Match>
- <Class
name="org.apache.hadoop.hdds.server.http.TestRatisDropwizardExports"></Class>
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.utils.BufferedMetricsCollector$BufferedMetricsRecordBuilderImpl"></Class>
- <Bug pattern="URF_UNREAD_FIELD" />
- </Match>
</FindBugsFilter>
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisDropwizardExports.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisDropwizardExports.java
index f2fee2c497..82023f696d 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisDropwizardExports.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisDropwizardExports.java
@@ -64,7 +64,7 @@ public void export() throws IOException {
new RatisDropwizardExports(dropWizardMetricRegistry);
CollectorRegistry collector = new CollectorRegistry();
- collector.register(new RatisDropwizardExports(dropWizardMetricRegistry));
+ collector.register(exports);
//export metrics to the string
StringWriter writer = new StringWriter();
diff --git a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
index dc08720c96..3571a8929e 100644
--- a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
@@ -18,42 +18,4 @@
<Match>
<Package name="org.apache.hadoop.hdds.protocol.proto"/>
</Match>
- <!-- Test -->
- <Match>
- <Class name="org.apache.hadoop.hdds.scm.TestHddsServerUtil" />
- <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.TestStorageContainerManagerHttpServer" />
- <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
- </Match>
- <Match>
- <Class name="org.apache.hadoop.hdds.scm.block.TestBlockManager" />
- <Bug pattern="REC_CATCH_EXCEPTION" />
- </Match>
- <Match>
- <Class name="org.apache.hadoop.hdds.scm.container.MockNodeManager" />
- <Bug pattern="URF_UNREAD_FIELD" />
- </Match>
- <Match>
- <Class name="org.apache.hadoop.hdds.scm.node.TestDatanodeAdminMonitor" />
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
- </Match>
- <Match>
- <Class name="org.apache.hadoop.hdds.scm.node.TestSCMNodeManager" />
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.server.TestSCMSecurityProtocolServer" />
- <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.metadata.TestSCMTransactionInfoCodec"/>
- <Bug pattern="NP_NULL_PARAM_DEREF_ALL_TARGETS_DANGEROUS" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.hdds.scm.container.replication.ReplicationManager"/>
- <Field name="metrics" />
- <Bug pattern="IS2_INCONSISTENT_SYNC" />
- </Match>
</FindBugsFilter>
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 13229c213b..b6e475f0b2 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -52,7 +52,7 @@ public class TestStorageContainerManagerHttpServer {
@BeforeAll
public static void setUp() throws Exception {
File ozoneMetadataDirectory = new File(baseDir, "metadata");
- ozoneMetadataDirectory.mkdirs();
+ assertTrue(ozoneMetadataDirectory.mkdirs());
conf = new OzoneConfiguration();
keystoresDir = baseDir.getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 0212999689..318e68983c 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -1643,8 +1643,8 @@ public void testScmNodeReportUpdate()
Thread.sleep(100);
}
- final long expectedScmUsed = usedPerHeartbeat * (heartbeatCount - 1);
- final long expectedRemaining = capacity - expectedScmUsed;
+ long expectedScmUsed = usedPerHeartbeat * (heartbeatCount - 1);
+ long expectedRemaining = capacity - expectedScmUsed;
GenericTestUtils.waitFor(
() -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed,
diff --git a/hadoop-ozone/httpfsgateway/dev-support/findbugsExcludeFile.xml
b/hadoop-ozone/httpfsgateway/dev-support/findbugsExcludeFile.xml
index c55b1d94e1..ea725446e4 100644
--- a/hadoop-ozone/httpfsgateway/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/httpfsgateway/dev-support/findbugsExcludeFile.xml
@@ -35,7 +35,4 @@
<Method name="closeFileSystem" />
<Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
</Match>
- <Match>
- <Source name="~.*Test.*\.java" />
- </Match>
</FindBugsFilter>
diff --git
a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/ozone/fs/http/server/metrics/TestHttpFSMetrics.java
b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/ozone/fs/http/server/metrics/TestHttpFSMetrics.java
index bfaf477ed3..c34df89505 100644
---
a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/ozone/fs/http/server/metrics/TestHttpFSMetrics.java
+++
b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/ozone/fs/http/server/metrics/TestHttpFSMetrics.java
@@ -19,6 +19,7 @@
import static
org.apache.ozone.lib.service.hadoop.FileSystemAccessService.FILE_SYSTEM_SERVICE_CREATED;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.isA;
import static org.mockito.Mockito.mock;
@@ -27,6 +28,7 @@
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
+import java.nio.charset.StandardCharsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -47,8 +49,12 @@
* Test class for HttpFSServerMetrics.
*/
public class TestHttpFSMetrics {
- private static final FileSystem mockFs = mock(FileSystem.class);
- private static final FSDataOutputStream fsDataOutputStream =
mock(FSDataOutputStream.class);
+ private static FileSystem mockFs = mock(FileSystem.class);
+ private static FSDataOutputStream fsDataOutputStream =
mock(FSDataOutputStream.class);
+
+ /**
+ * Mock FileSystemAccessService.
+ */
public static class MockFileSystemAccessService extends
FileSystemAccessService {
@Override
protected FileSystem createFileSystem(Configuration namenodeConf) throws
IOException {
@@ -70,11 +76,11 @@ protected void closeFileSystem(FileSystem fs) throws
IOException {
@BeforeAll
static void init(@TempDir File dir) throws Exception {
File tempDir = new File(dir, "temp");
- tempDir.mkdirs();
+ assertTrue(tempDir.mkdirs());
File logDir = new File(dir, "log");
- logDir.mkdirs();
+ assertTrue(logDir.mkdirs());
File confDir = new File(dir, "conf");
- confDir.mkdirs();
+ assertTrue(confDir.mkdirs());
System.setProperty("httpfs.home.dir", dir.getAbsolutePath());
}
@@ -89,7 +95,7 @@ public void setUp() throws Exception {
fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
metrics = HttpFSServerWebApp.getMetrics();
- ugi = UserGroupInformation.createUserForTesting("testuser", new String[] {
"testgroup" });
+ ugi = UserGroupInformation.createUserForTesting("testuser", new String[]
{"testgroup"});
}
@AfterEach
@@ -106,7 +112,7 @@ public void testFsCreate() throws Exception {
long initialBytesWritten = metrics.getBytesWritten();
FSOperations.FSCreate createOp = new FSOperations.FSCreate(
- new ByteArrayInputStream("test".getBytes()),
+ new ByteArrayInputStream("test".getBytes(StandardCharsets.UTF_8)),
"/test.txt",
(short) 0644,
true,
@@ -128,7 +134,7 @@ public void testFsAppend() throws Exception {
long initialBytesWritten = metrics.getBytesWritten();
FSOperations.FSAppend appendOp = new FSOperations.FSAppend(
- new ByteArrayInputStream("test".getBytes()),
+ new ByteArrayInputStream("test".getBytes(StandardCharsets.UTF_8)),
"/test.txt");
when(mockFs.append(isA(Path.class),
isA(Integer.class))).thenReturn(fsDataOutputStream);
diff --git
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
index 56cecbaae6..b21b0caaf3 100644
---
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
+++
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
@@ -20,6 +20,7 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
@@ -233,7 +234,7 @@ public void testDoesNotOverwrite(@TempDir File tempPath)
throws Exception {
*/
@Test
public void genconfFailureByInsufficientPermissions(@TempDir File tempPath)
throws Exception {
- tempPath.setReadOnly();
+ assertTrue(tempPath.setReadOnly());
String[] args = new String[]{tempPath.getAbsolutePath()};
executeWithException(args, "Insufficient permission.");
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]