This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new cd308eaa85 HDDS-12904. Move field declarations to start of class in
other hdds modules (#8336)
cd308eaa85 is described below
commit cd308eaa85b0a75f2e263c313da320f33fd227ff
Author: Ivan Zlenko <[email protected]>
AuthorDate: Sat Apr 26 23:08:32 2025 +0500
HDDS-12904. Move field declarations to start of class in other hdds modules
(#8336)
---
.../ozone/container/common/impl/ContainerSet.java | 18 +++++-----
.../ozone/erasurecode/rawcoder/CoderUtil.java | 6 ++--
.../ozone/erasurecode/rawcoder/util/GF256.java | 5 +--
.../ozone/erasurecode/rawcoder/util/RSUtil.java | 7 ++--
.../db/managed/ManagedRocksObjectMetrics.java | 12 +++----
.../utils/db/managed/ManagedRocksObjectUtils.java | 5 +--
.../utils/db/managed/ManagedRawSSTFileReader.java | 11 +++---
.../cli/datanode/DecommissionStatusSubCommand.java | 4 +--
.../hdds/scm/cli/datanode/UsageInfoSubcommand.java | 42 +++++++++++-----------
9 files changed, 56 insertions(+), 54 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index 68d73f3c41..9b5c89e1f7 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -58,15 +58,6 @@ public class ContainerSet implements Iterable<Container<?>> {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerSet.class);
- public static ContainerSet newReadOnlyContainerSet(long recoveringTimeout) {
- return new ContainerSet(null, recoveringTimeout);
- }
-
- public static ContainerSet newRwContainerSet(Table<ContainerID, String>
containerIdsTable, long recoveringTimeout) {
- Objects.requireNonNull(containerIdsTable, "containerIdsTable == null");
- return new ContainerSet(containerIdsTable, recoveringTimeout);
- }
-
private final ConcurrentSkipListMap<Long, Container<?>> containerMap = new
ConcurrentSkipListMap<>();
private final ConcurrentSkipListSet<Long> missingContainerSet =
@@ -77,6 +68,15 @@ public static ContainerSet
newRwContainerSet(Table<ContainerID, String> containe
private long recoveringTimeout;
private final Table<ContainerID, String> containerIdsTable;
+ public static ContainerSet newReadOnlyContainerSet(long recoveringTimeout) {
+ return new ContainerSet(null, recoveringTimeout);
+ }
+
+ public static ContainerSet newRwContainerSet(Table<ContainerID, String>
containerIdsTable, long recoveringTimeout) {
+ Objects.requireNonNull(containerIdsTable, "containerIdsTable == null");
+ return new ContainerSet(containerIdsTable, recoveringTimeout);
+ }
+
private ContainerSet(Table<ContainerID, String> continerIdsTable, long
recoveringTimeout) {
this(continerIdsTable, recoveringTimeout, null);
}
diff --git
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java
index 9273737763..ebf45e88dd 100644
---
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java
+++
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java
@@ -26,12 +26,12 @@
*/
public final class CoderUtil {
+ private static byte[] emptyChunk = new byte[4096];
+
private CoderUtil() {
- // No called
+ // Not called
}
- private static byte[] emptyChunk = new byte[4096];
-
/**
* Make sure to return an empty chunk buffer for the desired length.
* @param leastLength
diff --git
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
index 65a8557765..0524cea092 100644
---
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
+++
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
@@ -26,8 +26,6 @@
@InterfaceAudience.Private
public final class GF256 {
- private GF256() { }
-
private static final byte[] GF_BASE = new byte[]{
(byte) 0x01, (byte) 0x02, (byte) 0x04, (byte) 0x08, (byte) 0x10,
(byte) 0x20, (byte) 0x40, (byte) 0x80, (byte) 0x1d, (byte) 0x3a,
@@ -153,6 +151,9 @@ private GF256() { }
}
}
+ private GF256() {
+ }
+
/**
* Get the big GF multiply table so utilize it efficiently.
* @return the big GF multiply table
diff --git
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java
index 86911cb81c..ce5531fe0d 100644
---
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java
+++
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java
@@ -26,15 +26,14 @@
*/
@InterfaceAudience.Private
public final class RSUtil {
-
- private RSUtil() {
- }
-
// We always use the byte system (with symbol size 8, field size 256,
// primitive polynomial 285, and primitive root 2).
public static final GaloisField GF = GaloisField.getInstance();
public static final int PRIMITIVE_ROOT = 2;
+ private RSUtil() {
+ }
+
public static int[] getPrimitivePower(int numDataUnits, int numParityUnits) {
int[] primitivePower = new int[numDataUnits + numParityUnits];
// compute powers of the primitive root
diff --git
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java
index 0947dce348..ee8de31f76 100644
---
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java
+++
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java
@@ -37,12 +37,6 @@ public class ManagedRocksObjectMetrics {
private static final String SOURCE_NAME =
ManagedRocksObjectMetrics.class.getSimpleName();
- private static ManagedRocksObjectMetrics create() {
- return DefaultMetricsSystem.instance().register(SOURCE_NAME,
- "OzoneManager DoubleBuffer Metrics",
- new ManagedRocksObjectMetrics());
- }
-
@Metric(about = "Total number of managed RocksObjects that are not " +
"closed before being GCed.")
private MutableCounterLong totalLeakObjects;
@@ -74,4 +68,10 @@ long totalLeakObjects() {
long totalManagedObjects() {
return totalManagedObjects.value();
}
+
+ private static ManagedRocksObjectMetrics create() {
+ return DefaultMetricsSystem.instance().register(SOURCE_NAME,
+ "OzoneManager DoubleBuffer Metrics",
+ new ManagedRocksObjectMetrics());
+ }
}
diff --git
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
index eef1f286d6..e60508033e 100644
---
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
+++
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
@@ -34,8 +34,6 @@
* Utilities to help assert RocksObject closures.
*/
public final class ManagedRocksObjectUtils {
- private ManagedRocksObjectUtils() {
- }
static final Logger LOG =
LoggerFactory.getLogger(ManagedRocksObjectUtils.class);
@@ -44,6 +42,9 @@ private ManagedRocksObjectUtils() {
private static final LeakDetector LEAK_DETECTOR = new
LeakDetector("ManagedRocksObject");
+ private ManagedRocksObjectUtils() {
+ }
+
static UncheckedAutoCloseable track(AutoCloseable object) {
ManagedRocksObjectMetrics.INSTANCE.increaseManagedObject();
final Class<?> clazz = object.getClass();
diff --git
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
index 97f7a7a49e..461d9ddc8a 100644
---
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
+++
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
@@ -32,6 +32,12 @@
*/
public class ManagedRawSSTFileReader<T> implements Closeable {
+ private static final Logger LOG =
LoggerFactory.getLogger(ManagedRawSSTFileReader.class);
+
+ private final String fileName;
+ // Native address of pointer to the object.
+ private final long nativeHandle;
+
public static boolean tryLoadLibrary() {
try {
loadLibrary();
@@ -50,11 +56,6 @@ public static boolean loadLibrary() throws
NativeLibraryNotLoadedException {
return true;
}
- private final String fileName;
- // Native address of pointer to the object.
- private final long nativeHandle;
- private static final Logger LOG =
LoggerFactory.getLogger(ManagedRawSSTFileReader.class);
-
public ManagedRawSSTFileReader(final ManagedOptions options, final String
fileName, final int readAheadSize) {
this.fileName = fileName;
this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(),
fileName, readAheadSize);
diff --git
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
index 2243d0f0d2..75a58ceb4b 100644
---
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
+++
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
@@ -49,6 +49,8 @@
public class DecommissionStatusSubCommand extends ScmSubcommand {
+ private String errorMessage = "Error getting pipeline and container metrics
for ";
+
@CommandLine.Option(names = { "--id" },
description = "Show info by datanode UUID",
defaultValue = "")
@@ -122,8 +124,6 @@ public void execute(ScmClient scmClient) throws IOException
{
}
}
- private String errorMessage = "Error getting pipeline and container metrics
for ";
-
public String getErrorMessage() {
return errorMessage;
}
diff --git
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
index 514be5a620..2d486dab49 100644
---
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
+++
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
@@ -60,27 +60,6 @@ public class UsageInfoSubcommand extends ScmSubcommand {
@CommandLine.ArgGroup(multiplicity = "1")
private ExclusiveArguments exclusiveArguments;
- private static class ExclusiveArguments {
- @CommandLine.Option(names = {"--address"}, paramLabel = "ADDRESS",
- description = "Show info by datanode ip or hostname address.",
- defaultValue = "")
- private String address;
-
- @CommandLine.Option(names = {"--uuid"}, paramLabel = "UUID", description =
- "Show info by datanode UUID.", defaultValue = "")
- private String uuid;
-
- @CommandLine.Option(names = {"-m", "--most-used"},
- description = "Show the most used datanodes.",
- defaultValue = "false")
- private boolean mostUsed;
-
- @CommandLine.Option(names = {"-l", "--least-used"},
- description = "Show the least used datanodes.",
- defaultValue = "false")
- private boolean leastUsed;
- }
-
@CommandLine.Option(names = {"-c", "--count"}, description = "Number of " +
"datanodes to display (Default: ${DEFAULT-VALUE}).",
paramLabel = "NUMBER OF NODES", defaultValue = "3")
@@ -288,4 +267,25 @@ public long getPipelineCount() {
return pipelineCount;
}
}
+
+ private static class ExclusiveArguments {
+ @CommandLine.Option(names = {"--address"}, paramLabel = "ADDRESS",
+ description = "Show info by datanode ip or hostname address.",
+ defaultValue = "")
+ private String address;
+
+ @CommandLine.Option(names = {"--uuid"}, paramLabel = "UUID", description =
+ "Show info by datanode UUID.", defaultValue = "")
+ private String uuid;
+
+ @CommandLine.Option(names = {"-m", "--most-used"},
+ description = "Show the most used datanodes.",
+ defaultValue = "false")
+ private boolean mostUsed;
+
+ @CommandLine.Option(names = {"-l", "--least-used"},
+ description = "Show the least used datanodes.",
+ defaultValue = "false")
+ private boolean leastUsed;
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]