This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 190d043bc90 HDDS-14059. Replace Preconditions.checkNotNull in 
hdds-container-service (#9411)
190d043bc90 is described below

commit 190d043bc902a05c12d51f1ea8d57183ad799910
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Fri Dec 5 07:40:09 2025 -0800

    HDDS-14059. Replace Preconditions.checkNotNull in hdds-container-service 
(#9411)
---
 .../apache/hadoop/ozone/HddsDatanodeService.java   |   5 +-
 .../container/common/helpers/ContainerUtils.java   |  11 +-
 .../container/common/impl/ContainerDataYaml.java   |   4 +-
 .../ozone/container/common/impl/ContainerSet.java  |  11 +-
 .../container/common/impl/HddsDispatcher.java      |  17 +--
 .../common/impl/OpenContainerBlockMap.java         | 148 ---------------------
 .../ContainerDeletionChoosingPolicyTemplate.java   |   9 +-
 .../container/common/report/ReportManager.java     |   4 +-
 .../common/statemachine/StateContext.java          |  15 +--
 .../commandhandler/CommandDispatcher.java          |  18 +--
 .../SetNodeOperationalStateCommandHandler.java     |   4 +-
 .../states/endpoint/VersionEndpointTask.java       |   8 +-
 .../common/transport/server/XceiverServerGrpc.java |   4 +-
 .../server/ratis/ContainerStateMachine.java        |   4 +-
 .../container/common/volume/StorageVolume.java     |  13 +-
 .../container/keyvalue/KeyValueContainerData.java  |   3 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  |  33 +++--
 .../container/keyvalue/helpers/BlockUtils.java     |  28 ++--
 .../helpers/KeyValueContainerLocationUtil.java     |  17 +--
 .../keyvalue/helpers/KeyValueContainerUtil.java    |  11 +-
 .../container/keyvalue/impl/BlockManagerImpl.java  |  27 ++--
 .../keyvalue/impl/ChunkManagerDispatcher.java      |   8 +-
 .../keyvalue/impl/ChunkManagerDummyImpl.java       |   4 +-
 .../keyvalue/impl/FilePerBlockStrategy.java        |   8 +-
 .../keyvalue/impl/FilePerChunkStrategy.java        |   5 +-
 .../ozone/container/ozoneimpl/ContainerReader.java |   7 +-
 .../ScmHAFinalizeUpgradeActionDatanode.java        |   5 +-
 .../protocol/commands/CloseContainerCommand.java   |   4 +-
 .../protocol/commands/ClosePipelineCommand.java    |  10 +-
 .../protocol/commands/CreatePipelineCommand.java   |   4 +-
 .../protocol/commands/DeleteContainerCommand.java  |   4 +-
 .../commands/FinalizeNewLayoutVersionCommand.java  |   4 +-
 .../commands/ReconcileContainerCommand.java        |   3 +-
 .../commands/ReconstructECContainersCommand.java   |   3 +-
 .../commands/RefreshVolumeUsageCommand.java        |   4 +-
 .../commands/ReplicateContainerCommand.java        |   4 +-
 .../commands/SetNodeOperationalStateCommand.java   |   4 +-
 37 files changed, 154 insertions(+), 321 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 1d0e431c65c..3efa83a55cd 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -40,6 +40,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -428,7 +429,7 @@ private void unregisterMXBean() {
   private DatanodeDetails initializeDatanodeDetails()
       throws IOException {
     String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    Preconditions.checkNotNull(idFilePath);
+    Objects.requireNonNull(idFilePath, "idFilePath == null");
     File idFile = new File(idFilePath);
     DatanodeDetails details;
     if (idFile.exists()) {
@@ -453,7 +454,7 @@ private DatanodeDetails initializeDatanodeDetails()
   private void persistDatanodeDetails(DatanodeDetails dnDetails)
       throws IOException {
     String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    Preconditions.checkNotNull(idFilePath);
+    Objects.requireNonNull(idFilePath,  "idFilePath == null");
     File idFile = new File(idFilePath);
     ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile, conf);
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index e38a7666199..33d7dc9a324 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -27,7 +27,6 @@
 import static 
org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getContainerCommandResponse;
 import static 
org.apache.hadoop.ozone.container.common.impl.ContainerData.CHARSET_ENCODING;
 
-import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
@@ -35,6 +34,7 @@
 import java.nio.file.Paths;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.Objects;
 import java.util.UUID;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -105,13 +105,13 @@ public static ContainerCommandResponseProto 
logAndReturnError(
    * @return Name of the container.
    */
   public static String getContainerNameFromFile(File containerFile) {
-    Preconditions.checkNotNull(containerFile);
+    Objects.requireNonNull(containerFile, "containerFile == null");
     return Paths.get(containerFile.getParent()).resolve(
         removeExtension(containerFile.getName())).toString();
   }
 
   public static long getContainerIDFromFile(File containerFile) {
-    Preconditions.checkNotNull(containerFile);
+    Objects.requireNonNull(containerFile, "containerFile == null");
     String containerID = getContainerNameFromFile(containerFile);
     return Long.parseLong(containerID);
   }
@@ -123,9 +123,8 @@ public static long getContainerIDFromFile(File 
containerFile) {
    */
   public static void verifyIsNewContainer(File containerFile) throws
       FileAlreadyExistsException {
+    Objects.requireNonNull(containerFile, "containerFile == null");
     Logger log = LoggerFactory.getLogger(ContainerSet.class);
-    Preconditions.checkNotNull(containerFile, "containerFile Should not be " +
-        "null");
     if (containerFile.getParentFile().exists()) {
       log.error("Container already exists on disk. File: {}", containerFile
           .toPath());
@@ -259,7 +258,7 @@ public static File getContainerFile(File containerBaseDir) {
    */
   public static File getChunkDir(ContainerData containerData)
       throws StorageContainerException {
-    Preconditions.checkNotNull(containerData, "Container data can't be null");
+    Objects.requireNonNull(containerData, "containerData == null");
 
     String chunksPath = containerData.getChunksPath();
     if (chunksPath == null) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
index 14d4ec70ddb..ceacb49f289 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
@@ -20,7 +20,6 @@
 import static org.apache.hadoop.ozone.OzoneConsts.REPLICA_INDEX;
 import static 
org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.KEYVALUE_YAML_TAG;
 
-import com.google.common.base.Preconditions;
 import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.IOException;
@@ -29,6 +28,7 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.TreeSet;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -87,7 +87,7 @@ public static void createContainerFile(ContainerData 
containerData, File contain
    */
   public static ContainerData readContainerFile(File containerFile)
       throws IOException {
-    Preconditions.checkNotNull(containerFile, "containerFile cannot be null");
+    Objects.requireNonNull(containerFile, "containerFile == null");
     try (InputStream inputFileStream = 
Files.newInputStream(containerFile.toPath())) {
       return readContainer(inputFileStream);
     }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index 97b958d42e5..912da45d25a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -82,7 +82,7 @@ public static ContainerSet newReadOnlyContainerSet(long 
recoveringTimeout) {
 
   public static ContainerSet newRwContainerSet(
       WitnessedContainerMetadataStore metadataStore, long recoveringTimeout) {
-    Objects.requireNonNull(metadataStore, "WitnessedContainerMetadataStore == 
null");
+    Objects.requireNonNull(metadataStore, "metadataStore == null");
     return new ContainerSet(metadataStore, recoveringTimeout);
   }
 
@@ -187,7 +187,7 @@ public void scanContainerWithoutGap(long containerID, 
String reasonForScan) {
    */
   private boolean addContainer(Container<?> container, boolean overwrite) 
throws
       StorageContainerException {
-    Preconditions.checkNotNull(container, "container cannot be null");
+    Objects.requireNonNull(container, "container == null");
 
     long containerId = container.getContainerData().getContainerID();
     State containerState = container.getContainerData().getState();
@@ -416,7 +416,7 @@ public Iterator<Map.Entry<Long, Long>> 
getRecoveringContainerIterator() {
    * @return {@literal Iterator<Container<?>>}
    */
   public Iterator<Container<?>> getContainerIterator(HddsVolume volume) {
-    Preconditions.checkNotNull(volume);
+    Objects.requireNonNull(volume, "volume == null");
     Iterator<Long> containerIdIterator = volume.getContainerIterator();
 
     List<Container<?>> containers = new ArrayList<>();
@@ -439,7 +439,7 @@ public Iterator<Container<?>> 
getContainerIterator(HddsVolume volume) {
    * @return number of containers
    */
   public long containerCount(HddsVolume volume) {
-    Preconditions.checkNotNull(volume);
+    Objects.requireNonNull(volume, "volume == null");
     return volume.getContainerCount();
   }
 
@@ -479,8 +479,7 @@ public Map<Long, Container<?>> getContainerMap() {
   public void listContainer(long startContainerId, long count,
                             List<ContainerData> data) throws
       StorageContainerException {
-    Preconditions.checkNotNull(data,
-        "Internal assertion: data cannot be null");
+    Objects.requireNonNull(data, "data == null");
     Preconditions.checkState(startContainerId >= 0,
         "Start container Id cannot be negative");
     Preconditions.checkState(count > 0,
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index d9c82f1deaf..5dec84b0766 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -29,6 +29,7 @@
 import java.io.IOException;
 import java.util.Collections;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Optional;
 import java.util.Set;
 import java.util.TreeMap;
@@ -203,7 +204,7 @@ public ContainerCommandResponseProto dispatch(
   @SuppressWarnings("methodlength")
   private ContainerCommandResponseProto dispatchRequest(
       ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
-    Preconditions.checkNotNull(msg);
+    Objects.requireNonNull(msg, "msg == null");
     if (LOG.isTraceEnabled()) {
       LOG.trace("Command {}, trace ID: {} ", msg.getCmdType(),
           msg.getTraceID());
@@ -267,7 +268,7 @@ private ContainerCommandResponseProto dispatchRequest(
       // snapshot.
       // just add it to the list, and remove it from missing container set
       // as it might have been added in the list during "init".
-      Preconditions.checkNotNull(container2BCSIDMap);
+      Objects.requireNonNull(container2BCSIDMap, "container2BCSIDMap == null");
       if (container != null && container2BCSIDMap.get(containerID) == null) {
         container2BCSIDMap.put(
             containerID, container.getBlockCommitSequenceId());
@@ -428,8 +429,8 @@ && getMissingContainerSet().contains(containerID)) {
       }
       if (cmdType == Type.CreateContainer
           && result == Result.SUCCESS && dispatcherContext != null) {
-        Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap());
-        container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0));
+        Objects.requireNonNull(container2BCSIDMap, "container2BCSIDMap == 
null");
+        container2BCSIDMap.putIfAbsent(containerID, 0L);
       }
       if (result == Result.SUCCESS) {
         updateBCSID(container, dispatcherContext, cmdType);
@@ -467,12 +468,12 @@ private void updateBCSID(Container container,
       DispatcherContext dispatcherContext, Type cmdType) {
     if (dispatcherContext != null && (cmdType == Type.PutBlock
         || cmdType == Type.PutSmallFile)) {
-      Preconditions.checkNotNull(container);
+      Objects.requireNonNull(container, "container == null");
       long bcsID = container.getBlockCommitSequenceId();
       long containerId = container.getContainerData().getContainerID();
       Map<Long, Long> container2BCSIDMap;
       container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap();
-      Preconditions.checkNotNull(container2BCSIDMap);
+      Objects.requireNonNull(container2BCSIDMap, "container2BCSIDMap == null");
       Preconditions.checkArgument(container2BCSIDMap.containsKey(containerId));
       // updates the latest BCSID on every putBlock or putSmallFile
       // transaction over Ratis.
@@ -655,7 +656,7 @@ public Handler getHandler(ContainerProtos.ContainerType 
containerType) {
 
   @Override
   public void setClusterId(String clusterId) {
-    Preconditions.checkNotNull(clusterId, "clusterId cannot be null");
+    Objects.requireNonNull(clusterId, "clusterId == null");
     if (this.clusterId == null) {
       this.clusterId = clusterId;
       for (Map.Entry<ContainerType, Handler> handlerMap : handlers.entrySet()) 
{
@@ -821,6 +822,7 @@ public StateMachine.DataChannel getStreamDataChannel(
   public void streamDataReadOnly(ContainerCommandRequestProto msg,
       StreamObserver<ContainerCommandResponseProto> streamObserver,
       RandomAccessFileChannel blockFile, DispatcherContext dispatcherContext) {
+    Objects.requireNonNull(msg, "msg == null");
     Type cmdType = msg.getCmdType();
     String traceID = msg.getTraceID();
     Span span = TracingUtil.importAndCreateSpan(cmdType.toString(), traceID);
@@ -828,7 +830,6 @@ public void streamDataReadOnly(ContainerCommandRequestProto 
msg,
     EventType eventType = getEventType(msg);
 
     try (UncheckedAutoCloseable ignored = protocolMetrics.measure(cmdType)) {
-      Preconditions.checkNotNull(msg);
       if (LOG.isTraceEnabled()) {
         LOG.trace("Command {}, trace ID: {}.", msg.getCmdType(), traceID);
       }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
deleted file mode 100644
index 6066199be7a..00000000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.function.Function;
-import org.apache.hadoop.hdds.client.BlockID;
-import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-
-/**
- * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}).
- * The outer container map does not entail locking for a better performance.
- * The inner {@code BlockDataMap} is synchronized.
- *
- * This class will maintain list of open keys per container when closeContainer
- * command comes, it should autocommit all open keys of a open container before
- * marking the container as closed.
- */
-public class OpenContainerBlockMap {
-  /**
-   * TODO : We may construct the openBlockMap by reading the Block Layout
-   * for each block inside a container listing all chunk files and reading the
-   * sizes. This will help to recreate the openKeys Map once the DataNode
-   * restarts.
-   *
-   * For now, we will track all open blocks of a container in the blockMap.
-   */
-  private final ConcurrentMap<Long, BlockDataMap> containers = new 
ConcurrentHashMap<>();
-
-  /**
-   * Map: localId {@literal ->} BlockData.
-   *
-   * In order to support {@link #getAll()}, the update operations are
-   * synchronized.
-   */
-  static class BlockDataMap {
-    private final ConcurrentMap<Long, BlockData> blocks =
-        new ConcurrentHashMap<>();
-
-    BlockData get(long localId) {
-      return blocks.get(localId);
-    }
-
-    synchronized int removeAndGetSize(long localId) {
-      blocks.remove(localId);
-      return blocks.size();
-    }
-
-    synchronized BlockData computeIfAbsent(
-        long localId, Function<Long, BlockData> f) {
-      return blocks.computeIfAbsent(localId, f);
-    }
-
-    synchronized List<BlockData> getAll() {
-      return new ArrayList<>(blocks.values());
-    }
-  }
-
-  /**
-   * Removes the Container matching with specified containerId.
-   * @param containerId containerId
-   */
-  public void removeContainer(long containerId) {
-    Preconditions
-        .checkState(containerId >= 0, "Container Id cannot be negative.");
-    containers.remove(containerId);
-  }
-
-  public void addChunk(BlockID blockID, ChunkInfo info) {
-    Preconditions.checkNotNull(info);
-    containers.computeIfAbsent(blockID.getContainerID(),
-        id -> new BlockDataMap()).computeIfAbsent(blockID.getLocalID(),
-          id -> new BlockData(blockID)).addChunk(info);
-  }
-
-  /**
-   * Removes the chunk from the chunkInfo list for the given block.
-   * @param blockID id of the block
-   * @param chunkInfo chunk info.
-   */
-  public void removeChunk(BlockID blockID, ChunkInfo chunkInfo) {
-    Preconditions.checkNotNull(chunkInfo);
-    Preconditions.checkNotNull(blockID);
-    Optional.ofNullable(containers.get(blockID.getContainerID()))
-        .map(blocks -> blocks.get(blockID.getLocalID()))
-        .ifPresent(keyData -> keyData.removeChunk(chunkInfo));
-  }
-
-  /**
-   * Returns the list of open blocks to the openContainerBlockMap.
-   * @param containerId container id
-   * @return List of open blocks
-   */
-  public List<BlockData> getOpenBlocks(long containerId) {
-    return Optional.ofNullable(containers.get(containerId))
-        .map(BlockDataMap::getAll)
-        .orElseGet(Collections::emptyList);
-  }
-
-  /**
-   * removes the block from the block map.
-   * @param blockID - block ID
-   */
-  public void removeFromBlockMap(BlockID blockID) {
-    Preconditions.checkNotNull(blockID);
-    containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks)
-        -> blocks.removeAndGetSize(blockID.getLocalID()) == 0 ? null : blocks);
-  }
-
-  /**
-   * Returns true if the block exists in the map, false otherwise.
-   *
-   * @param blockID  - Block ID.
-   * @return True, if it exists, false otherwise
-   */
-  public boolean checkIfBlockExists(BlockID blockID) {
-    BlockDataMap keyDataMap = containers.get(blockID.getContainerID());
-    return keyDataMap != null && keyDataMap.get(blockID.getLocalID()) != null;
-  }
-
-  @VisibleForTesting
-  BlockDataMap getBlockDataMap(long containerId) {
-    return containers.get(containerId);
-  }
-}
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
index 775303a5aa7..04fb7dfc7f0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
@@ -17,12 +17,11 @@
 
 package org.apache.hadoop.ozone.container.common.interfaces;
 
-import com.google.common.base.Preconditions;
 import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import java.util.Objects;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import 
org.apache.hadoop.ozone.container.common.impl.BlockDeletingService.ContainerBlockInfo;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -40,10 +39,8 @@ public abstract class ContainerDeletionChoosingPolicyTemplate
 
   @Override
   public final List<ContainerBlockInfo> chooseContainerForBlockDeletion(
-      int blockCount, Map<Long, ContainerData> candidateContainers)
-      throws StorageContainerException {
-    Preconditions.checkNotNull(candidateContainers,
-        "Internal assertion: candidate containers cannot be null");
+      int blockCount, Map<Long, ContainerData> candidateContainers) {
+    Objects.requireNonNull(candidateContainers, "candidateContainers == null");
 
     int originalBlockCount = blockCount;
     List<ContainerBlockInfo> result = new ArrayList<>();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
index 05588a8cfd7..9d93f79cd61 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
@@ -17,11 +17,11 @@
 
 package org.apache.hadoop.ozone.container.common.report;
 
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.Message;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Objects;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -156,7 +156,7 @@ public Builder addThreadNamePrefix(String threadPrefix) {
      * @return {@link ReportManager}
      */
     public ReportManager build() {
-      Preconditions.checkNotNull(stateContext);
+      Objects.requireNonNull(stateContext, "stateContext == null");
       return new ReportManager(
           stateContext, reportPublishers, threadNamePrefix);
     }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index a7ea469f0c8..24496525a56 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -25,7 +25,6 @@
 import static 
org.apache.hadoop.hdds.utils.HddsServerUtil.getScmInitialHeartbeatInterval;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import com.google.protobuf.Descriptors.Descriptor;
 import com.google.protobuf.Message;
 import java.io.IOException;
@@ -304,9 +303,9 @@ public void addIncrementalReport(Message report) {
       return;
     }
     final Descriptor descriptor = report.getDescriptorForType();
-    Preconditions.checkState(descriptor != null);
+    Objects.requireNonNull(descriptor, "descriptor == null");
     final String reportType = descriptor.getFullName();
-    Preconditions.checkState(reportType != null);
+    Objects.requireNonNull(reportType, "reportType == null");
     // in some case, we want to add a fullReportType message
     // as an incremental message.
     // see XceiverServerRatis#sendPipelineReport
@@ -327,9 +326,9 @@ public void refreshFullReport(Message report) {
       return;
     }
     final Descriptor descriptor = report.getDescriptorForType();
-    Preconditions.checkState(descriptor != null);
+    Objects.requireNonNull(descriptor, "descriptor == null");
     final String reportType = descriptor.getFullName();
-    Preconditions.checkState(reportType != null);
+    Objects.requireNonNull(reportType, "reportType == null");
     if (!fullReportTypeList.contains(reportType)) {
       throw new IllegalArgumentException(
           "not full report message type: " + reportType);
@@ -358,9 +357,9 @@ public void putBackReports(List<Message> reportsToPutBack,
     // We don't expect too much reports to be put back
     for (Message report : reportsToPutBack) {
       final Descriptor descriptor = report.getDescriptorForType();
-      Preconditions.checkState(descriptor != null);
+      Objects.requireNonNull(descriptor, "descriptor == null");
       final String reportType = descriptor.getFullName();
-      Preconditions.checkState(reportType != null);
+      Objects.requireNonNull(reportType, "reportType == null");
     }
     synchronized (incrementalReportsQueue) {
       if (incrementalReportsQueue.containsKey(endpoint)) {
@@ -522,7 +521,7 @@ public List<ContainerAction> getPendingContainerAction(
         int limit = size > maxLimit ? maxLimit : size;
         for (int count = 0; count < limit; count++) {
           ContainerAction action = actions.poll();
-          Preconditions.checkNotNull(action);
+          Objects.requireNonNull(action, "action == null");
           containerActionList.add(action);
         }
       }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
index 482878e6f58..fd6e998fd16 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
@@ -23,6 +23,7 @@
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
 import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics;
@@ -91,7 +92,7 @@ public ClosePipelineCommandHandler 
getClosePipelineCommandHandler() {
    * @param command - SCM Command.
    */
   public void handle(SCMCommand<?> command) {
-    Preconditions.checkNotNull(command);
+    Objects.requireNonNull(command, "command == null");
     CommandHandler handler = handlerMap.get(command.getType());
     if (handler != null) {
       commandHandlerMetrics.increaseCommandCount(command.getType());
@@ -153,7 +154,7 @@ public Builder() {
      * @return Builder
      */
     public Builder addHandler(CommandHandler handler) {
-      Preconditions.checkNotNull(handler);
+      Objects.requireNonNull(handler, "handler == null");
       handlerList.add(handler);
       return this;
     }
@@ -165,7 +166,7 @@ public Builder addHandler(CommandHandler handler) {
      * @return Builder
      */
     public Builder setContainer(OzoneContainer ozoneContainer) {
-      Preconditions.checkNotNull(ozoneContainer);
+      Objects.requireNonNull(ozoneContainer,  "ozoneContainer == null");
       this.container = ozoneContainer;
       return this;
     }
@@ -178,7 +179,7 @@ public Builder setContainer(OzoneContainer ozoneContainer) {
      */
     public Builder setConnectionManager(SCMConnectionManager
         scmConnectionManager) {
-      Preconditions.checkNotNull(scmConnectionManager);
+      Objects.requireNonNull(scmConnectionManager, "scmConnectionManager == 
null");
       this.connectionManager = scmConnectionManager;
       return this;
     }
@@ -190,7 +191,7 @@ public Builder setConnectionManager(SCMConnectionManager
      * @return this
      */
     public Builder setContext(StateContext stateContext) {
-      Preconditions.checkNotNull(stateContext);
+      Objects.requireNonNull(stateContext, "stateContext == null");
       this.context = stateContext;
       return this;
     }
@@ -200,10 +201,9 @@ public Builder setContext(StateContext stateContext) {
      * @return Command Dispatcher.
      */
     public CommandDispatcher build() {
-      Preconditions.checkNotNull(this.connectionManager,
-          "Missing scm connection manager.");
-      Preconditions.checkNotNull(this.container, "Missing ozone container.");
-      Preconditions.checkNotNull(this.context, "Missing state context.");
+      Objects.requireNonNull(connectionManager, "connectionManager == null");
+      Objects.requireNonNull(container, "container == null");
+      Objects.requireNonNull(context, "context == null");
       Preconditions.checkArgument(!this.handlerList.isEmpty(),
           "The number of command handlers must be greater than 0.");
       return new CommandDispatcher(this.container, this.connectionManager,
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
index 4872efbd4b2..bffef663ba9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
@@ -17,9 +17,9 @@
 
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
-import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
+import java.util.Objects;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Consumer;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -119,7 +119,7 @@ private void persistUpdatedDatanodeDetails(
   private void persistDatanodeDetails(DatanodeDetails dnDetails)
       throws IOException {
     String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    Preconditions.checkNotNull(idFilePath);
+    Objects.requireNonNull(idFilePath, "idFilePath == null");
     File idFile = new File(idFilePath);
     ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile, conf);
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 78002d27917..0f2b26b148d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -17,9 +17,9 @@
 
 package org.apache.hadoop.ozone.container.common.states.endpoint;
 
-import com.google.common.base.Preconditions;
 import java.io.IOException;
 import java.net.BindException;
+import java.util.Objects;
 import java.util.concurrent.Callable;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
@@ -75,10 +75,8 @@ public EndpointStateMachine.EndPointStates call() throws 
Exception {
           String scmId = response.getValue(OzoneConsts.SCM_ID);
           String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
 
-          Preconditions.checkNotNull(scmId,
-              "Reply from SCM: scmId cannot be null");
-          Preconditions.checkNotNull(clusterId,
-              "Reply from SCM: clusterId cannot be null");
+          Objects.requireNonNull(scmId, "scmId == null");
+          Objects.requireNonNull(clusterId, "clusterId == null");
 
           // Check DbVolumes, format DbVolume at first register time.
           checkVolumeSet(ozoneContainer.getDbVolumeSet(), scmId, clusterId);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 93a342a95c1..e7d0d576188 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.container.common.transport.server;
 
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import io.opentelemetry.api.trace.Span;
 import io.opentelemetry.context.Scope;
@@ -25,6 +24,7 @@
 import java.net.BindException;
 import java.util.Collections;
 import java.util.List;
+import java.util.Objects;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -85,7 +85,7 @@ public final class XceiverServerGrpc implements 
XceiverServerSpi {
   public XceiverServerGrpc(DatanodeDetails datanodeDetails,
       ConfigurationSource conf,
       ContainerDispatcher dispatcher, CertificateClient caClient) {
-    Preconditions.checkNotNull(conf);
+    Objects.requireNonNull(conf, "conf == null");
 
     this.id = datanodeDetails.getID();
     this.datanodeDetails = datanodeDetails;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 1ec30914e2c..39525eb0893 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -878,9 +878,7 @@ private ByteString readStateMachineData(
     }
 
     // assert that the response has data in it.
-    Preconditions
-        .checkNotNull(data, "read chunk data is null for chunk: %s",
-            chunkInfo);
+    Objects.requireNonNull(data, () -> "data == null for " + 
TextFormat.shortDebugString(chunkInfo));
     Preconditions.checkState(data.size() == chunkInfo.getLen(),
         "read chunk len=%s does not match chunk expected len=%s for chunk:%s",
         data.size(), chunkInfo.getLen(), chunkInfo);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index 4da3b4648dc..e10ad3916da 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -194,9 +194,7 @@ public void setGatherContainerUsages(Function<HddsVolume, 
Long> gatherContainerU
   }
 
   public void format(String cid) throws IOException {
-    Preconditions.checkNotNull(cid, "clusterID cannot be null while " +
-        "formatting Volume");
-    this.clusterID = cid;
+    this.clusterID = Objects.requireNonNull(cid, "clusterID == null");
     initialize();
   }
 
@@ -343,12 +341,9 @@ private void createVersionFile() throws IOException {
   }
 
   private void writeVersionFile() throws IOException {
-    Preconditions.checkNotNull(this.storageID,
-        "StorageID cannot be null in Version File");
-    Preconditions.checkNotNull(this.clusterID,
-        "ClusterID cannot be null in Version File");
-    Preconditions.checkNotNull(this.datanodeUuid,
-        "DatanodeUUID cannot be null in Version File");
+    Objects.requireNonNull(storageID, "storageID == null");
+    Objects.requireNonNull(clusterID, "clusterID == null");
+    Objects.requireNonNull(datanodeUuid, "datanodeUuid == null");
     Preconditions.checkArgument(this.cTime > 0,
         "Creation Time should be positive");
     Preconditions.checkArgument(this.layoutVersion ==
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index e80655e0248..b7804a7e8b0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -45,6 +45,7 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -326,7 +327,7 @@ public void clearFinalizedBlock(DBHandle db) throws 
IOException {
     if (!finalizedBlockSet.isEmpty()) {
       // delete from db and clear memory
       // Should never fail.
-      Preconditions.checkNotNull(db, "DB cannot be null here");
+      Objects.requireNonNull(db, "db == null");
       try (BatchOperation batch = 
db.getStore().getBatchHandler().initBatchOperation()) {
         db.getStore().getFinalizeBlocksTable().deleteBatchWithPrefix(batch, 
containerPrefix());
         db.getStore().getBatchHandler().commitBatchOperation(batch);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 6fc05b9f00a..04ad8287ec2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -65,7 +65,6 @@
 import static org.apache.ratis.util.Preconditions.assertTrue;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.Striped;
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -87,6 +86,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
+import java.util.Objects;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.locks.Lock;
@@ -657,7 +657,7 @@ ContainerCommandResponseProto handlePutBlock(
 
       ContainerProtos.BlockData data = request.getPutBlock().getBlockData();
       BlockData blockData = BlockData.getFromProtoBuf(data);
-      Preconditions.checkNotNull(blockData);
+      Objects.requireNonNull(blockData, "blockData == null");
 
       boolean endOfBlock = false;
       if (!request.getPutBlock().hasEof() || request.getPutBlock().getEof()) {
@@ -720,7 +720,7 @@ ContainerCommandResponseProto handleFinalizeBlock(
       checkContainerOpen(kvContainer);
       BlockID blockID = BlockID.getFromProtobuf(
           request.getFinalizeBlock().getBlockID());
-      Preconditions.checkNotNull(blockID);
+      Objects.requireNonNull(blockID, "blockID == null");
 
       LOG.info("Finalized Block request received {} ", blockID);
 
@@ -950,7 +950,7 @@ ContainerCommandResponseProto handleReadChunk(
           request.getReadChunk().getBlockID());
       ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk()
           .getChunkData());
-      Preconditions.checkNotNull(chunkInfo);
+      Objects.requireNonNull(chunkInfo, "chunkInfo == null");
       BlockUtils.verifyReplicaIdx(kvContainer, blockID);
       BlockUtils.verifyBCSId(kvContainer, blockID);
 
@@ -988,8 +988,7 @@ ContainerCommandResponseProto handleReadChunk(
           request);
     }
 
-    Preconditions.checkNotNull(data, "Chunk data is null");
-
+    Objects.requireNonNull(data, "data == null");
     return getReadChunkResponse(request, data, byteBufferToByteString);
   }
 
@@ -1047,7 +1046,7 @@ ContainerCommandResponseProto handleWriteChunk(
       ContainerProtos.ChunkInfo chunkInfoProto = writeChunk.getChunkData();
 
       ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
-      Preconditions.checkNotNull(chunkInfo);
+      Objects.requireNonNull(chunkInfo,  "chunkInfo == null");
 
       ChunkBuffer data = null;
       if (dispatcherContext == null) {
@@ -1110,9 +1109,9 @@ ContainerCommandResponseProto handleWriteChunk(
   public void writeChunkForClosedContainer(ChunkInfo chunkInfo, BlockID 
blockID,
                                            ChunkBuffer data, KeyValueContainer 
kvContainer)
       throws IOException {
-    Preconditions.checkNotNull(kvContainer);
-    Preconditions.checkNotNull(chunkInfo);
-    Preconditions.checkNotNull(data);
+    Objects.requireNonNull(kvContainer, "kvContainer == null");
+    Objects.requireNonNull(chunkInfo, "chunkInfo == null");
+    Objects.requireNonNull(data, "data == null");
     long writeChunkStartTime = Time.monotonicNowNanos();
     if (!checkContainerClose(kvContainer)) {
       throw new IOException("Container #" + 
kvContainer.getContainerData().getContainerID() +
@@ -1141,8 +1140,8 @@ public void writeChunkForClosedContainer(ChunkInfo 
chunkInfo, BlockID blockID,
   public void putBlockForClosedContainer(KeyValueContainer kvContainer, 
BlockData blockData,
                                          long blockCommitSequenceId, boolean 
overwriteBscId)
       throws IOException {
-    Preconditions.checkNotNull(kvContainer);
-    Preconditions.checkNotNull(blockData);
+    Objects.requireNonNull(kvContainer, "kvContainer == null");
+    Objects.requireNonNull(blockData, "blockData == null");
     long startTime = Time.monotonicNowNanos();
 
     if (!checkContainerClose(kvContainer)) {
@@ -1186,11 +1185,11 @@ ContainerCommandResponseProto handlePutSmallFile(
 
       BlockData blockData = BlockData.getFromProtoBuf(
           putSmallFileReq.getBlock().getBlockData());
-      Preconditions.checkNotNull(blockData);
+      Objects.requireNonNull(blockData, "blockData == null");
 
       ContainerProtos.ChunkInfo chunkInfoProto = 
putSmallFileReq.getChunkInfo();
       ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
-      Preconditions.checkNotNull(chunkInfo);
+      Objects.requireNonNull(chunkInfo, "chunkInfo == null");
 
       ChunkBuffer data = ChunkBuffer.wrap(
           putSmallFileReq.getData().asReadOnlyByteBufferList());
@@ -1356,8 +1355,8 @@ public Container importContainer(ContainerData 
originalContainerData,
       final InputStream rawContainerStream,
       final TarContainerPacker packer)
       throws IOException {
-    Preconditions.checkState(originalContainerData instanceof
-        KeyValueContainerData, "Should be KeyValueContainerData instance");
+    assertTrue(originalContainerData instanceof KeyValueContainerData,
+        () -> "Expected KeyValueContainerData but " + 
originalContainerData.getClass());
 
     KeyValueContainerData containerData = new KeyValueContainerData(
         (KeyValueContainerData) originalContainerData);
@@ -2251,7 +2250,7 @@ private boolean logBlocksFoundOnDisk(Container container) 
throws IOException {
     // List files left over
     File chunksPath = new
         File(container.getContainerData().getChunksPath());
-    Preconditions.checkArgument(chunksPath.isDirectory());
+    assertTrue(chunksPath.isDirectory(), () -> chunksPath + " is not a 
directory");
     boolean notEmpty = false;
     try (DirectoryStream<Path> dir
              = Files.newDirectoryStream(chunksPath.toPath())) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
index 7dc02dffbb8..87a1328ba8a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -29,6 +29,7 @@
 import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
+import java.util.Objects;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -119,18 +120,17 @@ public static DatanodeStore getUncachedDatanodeStore(
    */
   public static DBHandle getDB(KeyValueContainerData containerData,
       ConfigurationSource conf) throws StorageContainerException {
-    Preconditions.checkNotNull(containerData);
-    Preconditions.checkNotNull(containerData.getDbFile());
-
-    String containerDBPath = containerData.getDbFile().getAbsolutePath();
+    Objects.requireNonNull(containerData, "containerData == null");
+    final File dbFile = Objects.requireNonNull(containerData.getDbFile(), 
"dbFile == null");
+    final String containerDBPath = dbFile.getAbsolutePath();
     try {
       if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) {
         DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
-        Preconditions.checkNotNull(cache);
+        Objects.requireNonNull(cache, "cache == null");
         return cache.getDB(containerDBPath, conf);
       } else {
         ContainerCache cache = ContainerCache.getInstance(conf);
-        Preconditions.checkNotNull(cache);
+        Objects.requireNonNull(cache, "cache == null");
         return cache.getDB(containerData.getContainerID(), containerData
                 .getContainerDBType(), containerDBPath,
             containerData.getSchemaVersion(), conf);
@@ -152,12 +152,12 @@ public static DBHandle getDB(KeyValueContainerData 
containerData,
    */
   public static void removeDB(KeyValueContainerData container,
       ConfigurationSource conf) {
-    Preconditions.checkNotNull(container);
-    Preconditions.checkNotNull(container.getDbFile());
+    Objects.requireNonNull(container, "container == null");
+    Objects.requireNonNull(container.getDbFile(), "dbFile == null");
     Preconditions.checkState(!container.hasSchema(OzoneConsts.SCHEMA_V3));
 
     ContainerCache cache = ContainerCache.getInstance(conf);
-    Preconditions.checkNotNull(cache);
+    Objects.requireNonNull(cache, "cache == null");
     cache.removeDB(container.getDbFile().getAbsolutePath());
   }
 
@@ -183,11 +183,11 @@ public static void addDB(DatanodeStore store, String 
containerDBPath,
       ConfigurationSource conf, String schemaVersion) {
     if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) {
       DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
-      Preconditions.checkNotNull(cache);
+      Objects.requireNonNull(cache, "cache == null");
       cache.addDB(containerDBPath, new RawDB(store, containerDBPath));
     } else {
       ContainerCache cache = ContainerCache.getInstance(conf);
-      Preconditions.checkNotNull(cache);
+      Objects.requireNonNull(cache, "cache == null");
       cache.addDB(containerDBPath,
           new ReferenceCountedDB(store, containerDBPath));
     }
@@ -221,10 +221,8 @@ public static BlockData getBlockData(byte[] bytes) throws 
IOException {
   public static void verifyBCSId(Container container, BlockID blockID)
       throws IOException {
     long bcsId = blockID.getBlockCommitSequenceId();
-    Preconditions.checkNotNull(blockID,
-        "BlockID cannot be null");
-    Preconditions.checkNotNull(container,
-        "Container cannot be null");
+    Objects.requireNonNull(blockID, "blockID == null");
+    Objects.requireNonNull(container, "container == null");
 
     long containerBCSId = container.getBlockCommitSequenceId();
     if (containerBCSId < bcsId) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
index 70026f5f8e0..cce7d3d6b3b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
@@ -19,6 +19,7 @@
 
 import com.google.common.base.Preconditions;
 import java.io.File;
+import java.util.Objects;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.Storage;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -95,8 +96,8 @@ public static File getChunksLocationPath(String 
containerBaseDir) {
   public static String getBaseContainerLocation(String hddsVolumeDir,
                                                  String clusterId,
                                                  long containerId) {
-    Preconditions.checkNotNull(hddsVolumeDir, "Base Directory cannot be null");
-    Preconditions.checkNotNull(clusterId, "scmUuid cannot be null");
+    Objects.requireNonNull(hddsVolumeDir, "hddsVolumeDir == null");
+    Objects.requireNonNull(clusterId, "clusterId == null");
     Preconditions.checkState(containerId >= 0,
         "Container Id cannot be negative.");
 
@@ -124,13 +125,13 @@ private static String getContainerSubDirectory(long 
containerId) {
    */
   public static File getContainerDBFile(KeyValueContainerData containerData) {
     if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) {
-      Preconditions.checkNotNull(containerData.getVolume().getDbParentDir(), 
"Base Directory cannot be null");
-      return new File(containerData.getVolume().getDbParentDir(),
-          OzoneConsts.CONTAINER_DB_NAME);
+      final File dbParentDir = containerData.getVolume().getDbParentDir();
+      Objects.requireNonNull(dbParentDir, "dbParentDir == null");
+      return new File(dbParentDir, OzoneConsts.CONTAINER_DB_NAME);
     }
-    Preconditions.checkNotNull(containerData.getMetadataPath(), "Metadata 
Directory cannot be null");
-    return new File(containerData.getMetadataPath(), 
containerData.getContainerID() +
-        OzoneConsts.DN_CONTAINER_DB);
+    final String metadataPath = containerData.getMetadataPath();
+    Objects.requireNonNull(metadataPath, "metadataPath == null");
+    return new File(metadataPath, containerData.getContainerID() + 
OzoneConsts.DN_CONTAINER_DB);
   }
 
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 1dc699b2d2e..b6ac2085d91 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -27,6 +27,7 @@
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.util.Objects;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -81,8 +82,8 @@ private KeyValueContainerUtil() {
   public static void createContainerMetaData(
       File containerMetaDataPath, File chunksPath, File dbFile,
       String schemaVersion, ConfigurationSource conf) throws IOException {
-    Preconditions.checkNotNull(containerMetaDataPath);
-    Preconditions.checkNotNull(conf);
+    Objects.requireNonNull(containerMetaDataPath,  "containerMetaDataPath == 
null");
+    Objects.requireNonNull(conf, "conf == null");
 
     if (!containerMetaDataPath.mkdirs()) {
       LOG.error("Unable to create directory for metadata storage. Path: {}",
@@ -130,7 +131,7 @@ public static void createContainerMetaData(
   public static void removeContainer(
       KeyValueContainerData containerData, ConfigurationSource conf)
       throws IOException {
-    Preconditions.checkNotNull(containerData);
+    Objects.requireNonNull(containerData, "containerData == null");
     KeyValueContainerUtil.removeContainerDB(containerData, conf);
     KeyValueContainerUtil.moveToDeletedContainerDir(containerData,
         containerData.getVolume());
@@ -175,8 +176,8 @@ public static boolean noBlocksInContainer(DatanodeStore 
store,
                                             containerData,
                                             boolean bCheckChunksFilePath)
       throws IOException {
-    Preconditions.checkNotNull(store);
-    Preconditions.checkNotNull(containerData);
+    Objects.requireNonNull(store, "store == null");
+    Objects.requireNonNull(containerData, "containerData == null");
     if (containerData.isOpen()) {
       return false;
     }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index a8fe877e9ea..62dbcbe808e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -25,6 +25,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Objects;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -55,7 +56,6 @@ public class BlockManagerImpl implements BlockManager {
 
   private ConfigurationSource config;
 
-  private static final String DB_NULL_ERR_MSG = "DB cannot be null here";
   public static final String FULL_CHUNK = "full";
 
   // Default Read Buffer capacity when Checksum is not present
@@ -70,7 +70,7 @@ public class BlockManagerImpl implements BlockManager {
    * @param conf - Ozone configuration
    */
   public BlockManagerImpl(ConfigurationSource conf) {
-    Preconditions.checkNotNull(conf, "Config cannot be null");
+    Objects.requireNonNull(conf, "conf == null");
     this.config = conf;
     this.defaultReadBufferCapacity = config.getBufferSize(
         ScmConfigKeys.OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY,
@@ -105,7 +105,7 @@ public long putBlock(Container container, BlockData data,
   @Override
   public long putBlockForClosedContainer(Container container, BlockData data, 
boolean overwriteBcsId)
           throws IOException {
-    Preconditions.checkNotNull(data, "BlockData cannot be null for put 
operation.");
+    Objects.requireNonNull(data, "data == null");
     Preconditions.checkState(data.getContainerID() >= 0, "Container Id cannot 
be negative");
 
     KeyValueContainerData containerData = (KeyValueContainerData) 
container.getContainerData();
@@ -113,7 +113,7 @@ public long putBlockForClosedContainer(Container container, 
BlockData data, bool
     // We are not locking the key manager since RocksDB serializes all actions
     // against a single DB. We rely on DB level locking to avoid conflicts.
     try (DBHandle db = BlockUtils.getDB(containerData, config)) {
-      Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+      Objects.requireNonNull(db, "db == null");
 
       long blockBcsID = data.getBlockCommitSequenceId();
       long containerBcsID = containerData.getBlockCommitSequenceId();
@@ -174,8 +174,7 @@ public long putBlockForClosedContainer(Container container, 
BlockData data, bool
   public long persistPutBlock(KeyValueContainer container,
       BlockData data, boolean endOfBlock)
       throws IOException {
-    Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
-        "operation.");
+    Objects.requireNonNull(data, "data == null");
     Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
         "cannot be negative");
 
@@ -186,7 +185,7 @@ public long persistPutBlock(KeyValueContainer container,
     try (DBHandle db = BlockUtils.getDB(containerData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
-      Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+      Objects.requireNonNull(db, "db == null");
 
       long bcsId = data.getBlockCommitSequenceId();
       long containerBCSId = containerData.getBlockCommitSequenceId();
@@ -293,12 +292,12 @@ public long persistPutBlock(KeyValueContainer container,
   @Override
   public void finalizeBlock(Container container, BlockID blockId)
       throws IOException {
-    Preconditions.checkNotNull(blockId, "blockId cannot " +
-        "be null for finalizeBlock operation.");
+    Objects.requireNonNull(blockId, "blockId == null");
     Preconditions.checkState(blockId.getContainerID() >= 0,
         "Container Id cannot be negative");
 
     KeyValueContainer kvContainer = (KeyValueContainer)container;
+
     long localID = blockId.getLocalID();
 
     kvContainer.removeFromPendingPutBlockCache(localID);
@@ -306,7 +305,7 @@ public void finalizeBlock(Container container, BlockID 
blockId)
     try (DBHandle db = BlockUtils.getDB(kvContainer.getContainerData(),
         config)) {
       // Should never fail.
-      Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+      Objects.requireNonNull(db, "db == null");
 
       // persist finalizeBlock
       try (BatchOperation batch = db.getStore().getBatchHandler()
@@ -343,7 +342,7 @@ public BlockData getBlock(Container container, BlockID 
blockID) throws IOExcepti
     try (DBHandle db = BlockUtils.getDB(containerData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
-      Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+      Objects.requireNonNull(db, "db == null");
       BlockData blockData = getBlockByID(db, blockID, containerData);
       long id = blockData.getBlockID().getBlockCommitSequenceId();
       if (id < bcsId) {
@@ -363,7 +362,7 @@ public long getCommittedBlockLength(Container container, 
BlockID blockID)
     try (DBHandle db = BlockUtils.getDB(containerData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
-      Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+      Objects.requireNonNull(db, "db == null");
       BlockData blockData = getBlockByID(db, blockID, containerData);
       return blockData.getSize();
     }
@@ -409,7 +408,7 @@ public void deleteBlock(Container container, BlockID 
blockID) throws
   @Override
   public List<BlockData> listBlock(Container container, long startLocalID, int
       count) throws IOException {
-    Preconditions.checkNotNull(container, "container cannot be null");
+    Objects.requireNonNull(container, "container == null");
     Preconditions.checkState(startLocalID >= 0 || startLocalID == -1,
         "startLocal ID cannot be negative");
     Preconditions.checkArgument(count > 0,
@@ -442,7 +441,7 @@ public boolean blockExists(Container container, BlockID 
blockID) throws IOExcept
     try (DBHandle db = BlockUtils.getDB(containerData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
-      Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+      Objects.requireNonNull(db, "db == null");
       String blockKey = containerData.getBlockKey(blockID.getLocalID());
       return db.getStore().getBlockDataTable().isExist(blockKey);
     }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
index 5f37d186b1c..a83306ff79b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
@@ -21,11 +21,11 @@
 import static 
org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
 import static 
org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
 
-import com.google.common.base.Preconditions;
 import jakarta.annotation.Nonnull;
 import java.io.IOException;
 import java.util.EnumMap;
 import java.util.Map;
+import java.util.Objects;
 import org.apache.hadoop.hdds.client.BlockID;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.common.ChunkBuffer;
@@ -107,7 +107,7 @@ public ChunkBufferToByteString readChunk(Container 
container, BlockID blockID,
     final ChunkBufferToByteString data = selectHandler(container)
         .readChunk(container, blockID, info, dispatcherContext);
 
-    Preconditions.checkState(data != null);
+    Objects.requireNonNull(data, "data == null");
     container.getContainerData().getStatistics().updateRead(info.getLen());
 
     return data;
@@ -117,7 +117,7 @@ public ChunkBufferToByteString readChunk(Container 
container, BlockID blockID,
   public void deleteChunk(Container container, BlockID blockID, ChunkInfo info)
       throws StorageContainerException {
 
-    Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
+    Objects.requireNonNull(blockID, "blockID == null");
 
     // Delete the chunk from disk.
     // Do not decrement the ContainerData counters (usedBytes) here as it
@@ -131,7 +131,7 @@ public void deleteChunk(Container container, BlockID 
blockID, ChunkInfo info)
   public void deleteChunks(Container container, BlockData blockData)
       throws StorageContainerException {
 
-    Preconditions.checkNotNull(blockData, "Block data cannot be null.");
+    Objects.requireNonNull(blockData, "blockData == null");
 
     // Delete the chunks belonging to blockData.
     // Do not decrement the ContainerData counters (usedBytes) here as it
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
index ce92f7c2ada..3b9289fe61b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
@@ -19,8 +19,8 @@
 
 import static 
org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;
 
-import com.google.common.base.Preconditions;
 import java.nio.ByteBuffer;
+import java.util.Objects;
 import org.apache.hadoop.hdds.client.BlockID;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.common.ChunkBuffer;
@@ -45,7 +45,7 @@ public void writeChunk(Container container, BlockID blockID, 
ChunkInfo info,
       ChunkBuffer data, DispatcherContext dispatcherContext)
       throws StorageContainerException {
 
-    Preconditions.checkNotNull(dispatcherContext);
+    Objects.requireNonNull(dispatcherContext, "dispatcherContext == null");
     DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
 
     ContainerData containerData = container.getContainerData();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 4047b2535c0..36ebdc5aa82 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -38,6 +38,7 @@
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.time.Duration;
+import java.util.Objects;
 import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.BlockID;
@@ -125,7 +126,7 @@ public void writeChunk(Container container, BlockID 
blockID, ChunkInfo info,
 
     checkLayoutVersion(container);
 
-    Preconditions.checkNotNull(dispatcherContext);
+    Objects.requireNonNull(dispatcherContext, "dispatcherContext == null");
     DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
 
     if (info.getLen() <= 0) {
@@ -274,7 +275,7 @@ private void deleteChunk(Container container, BlockID 
blockID,
       throws StorageContainerException {
     checkLayoutVersion(container);
 
-    Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
+    Objects.requireNonNull(blockID, "blockID == null");
 
     final File file = getChunkFile(container, blockID);
 
@@ -287,8 +288,6 @@ private void deleteChunk(Container container, BlockID 
blockID,
     }
 
     if (verifyLength) {
-      Preconditions.checkNotNull(info, "Chunk info cannot be null for single " 
+
-          "chunk delete");
       checkFullDelete(info, file);
     }
 
@@ -302,6 +301,7 @@ private static File getChunkFile(Container container, 
BlockID blockID) throws St
 
   private static void checkFullDelete(ChunkInfo info, File chunkFile)
       throws StorageContainerException {
+    Objects.requireNonNull(info, "info == null");
     long fileLength = chunkFile.length();
     if ((info.getOffset() > 0) || (info.getLen() != fileLength)) {
       String msg = String.format(
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
index 4cd1b1ff560..a36d8471a8f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
@@ -30,6 +30,7 @@
 import java.nio.file.StandardCopyOption;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Objects;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -108,7 +109,7 @@ public void writeChunk(Container container, BlockID 
blockID, ChunkInfo info,
 
     checkLayoutVersion(container);
 
-    Preconditions.checkNotNull(dispatcherContext);
+    Objects.requireNonNull(dispatcherContext, "dispatcherContext == null");
     DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
     try {
       KeyValueContainer kvContainer = (KeyValueContainer) container;
@@ -303,7 +304,7 @@ public void deleteChunk(Container container, BlockID 
blockID, ChunkInfo info)
 
     checkLayoutVersion(container);
 
-    Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
+    Objects.requireNonNull(blockID, "blockID == null");
     KeyValueContainer kvContainer = (KeyValueContainer) container;
 
     // In version1, we have only chunk file.
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index 5e77462ddd9..a89a4958aa7 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -21,9 +21,9 @@
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.DELETED;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING;
 
-import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
+import java.util.Objects;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
@@ -82,7 +82,7 @@ public class ContainerReader implements Runnable {
   public ContainerReader(
       MutableVolumeSet volSet, HddsVolume volume, ContainerSet cset,
       ConfigurationSource conf, boolean shouldDelete) {
-    Preconditions.checkNotNull(volume);
+    Objects.requireNonNull(volume,  "volume == null");
     this.hddsVolume = volume;
     this.hddsVolumeDir = hddsVolume.getHddsRootDir();
     this.containerSet = cset;
@@ -103,8 +103,7 @@ public void run() {
   }
 
   public void readVolume(File hddsVolumeRootDir) {
-    Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" +
-        "cannot be null");
+    Objects.requireNonNull(hddsVolumeRootDir, "hddsVolumeRootDir == null");
 
     //filtering storage directory
     File[] storageDirs = hddsVolumeRootDir.listFiles(File::isDirectory);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ScmHAFinalizeUpgradeActionDatanode.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ScmHAFinalizeUpgradeActionDatanode.java
index cdd01ad7000..fb9da4bd823 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ScmHAFinalizeUpgradeActionDatanode.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ScmHAFinalizeUpgradeActionDatanode.java
@@ -21,11 +21,11 @@
 import static 
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.ON_FINALIZE;
 import static 
org.apache.hadoop.ozone.upgrade.UpgradeActionHdds.Component.DATANODE;
 
-import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.util.Objects;
 import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
@@ -70,8 +70,7 @@ public void execute(DatanodeStateMachine dsm) throws 
Exception {
    * @return true if the volume upgrade succeeded, false otherwise.
    */
   public static boolean upgradeVolume(StorageVolume volume, String clusterID) {
-    Preconditions.checkNotNull(clusterID, "Cannot upgrade volume with null " +
-        "cluster ID");
+    Objects.requireNonNull(clusterID, "clusterID == null");
     File hddsVolumeDir = volume.getStorageDir();
     File clusterIDDir = new File(hddsVolumeDir, clusterID);
     File[] storageDirs = volume.getStorageDir().listFiles(File::isDirectory);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
index 6bd5adb688b..639264a6390 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.protocol.commands;
 
-import com.google.common.base.Preconditions;
+import java.util.Objects;
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
@@ -67,7 +67,7 @@ public CloseContainerCommandProto getProto() {
 
   public static CloseContainerCommand getFromProtobuf(
       CloseContainerCommandProto closeContainerProto) {
-    Preconditions.checkNotNull(closeContainerProto);
+    Objects.requireNonNull(closeContainerProto, "closeContainerProto == null");
     return new CloseContainerCommand(closeContainerProto.getCmdId(),
         PipelineID.getFromProtobuf(closeContainerProto.getPipelineID()),
         closeContainerProto.getForce());
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ClosePipelineCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ClosePipelineCommand.java
index 0c1caa3dd68..f1f811daf24 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ClosePipelineCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ClosePipelineCommand.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.protocol.commands;
 
-import com.google.common.base.Preconditions;
+import java.util.Objects;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineCommandProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
@@ -59,11 +59,9 @@ public ClosePipelineCommandProto getProto() {
     return builder.build();
   }
 
-  public static ClosePipelineCommand getFromProtobuf(
-      ClosePipelineCommandProto createPipelineProto) {
-    Preconditions.checkNotNull(createPipelineProto);
-    return new ClosePipelineCommand(createPipelineProto.getCmdId(),
-        PipelineID.getFromProtobuf(createPipelineProto.getPipelineID()));
+  public static ClosePipelineCommand getFromProtobuf(ClosePipelineCommandProto 
proto) {
+    Objects.requireNonNull(proto, "proto == null");
+    return new ClosePipelineCommand(proto.getCmdId(), 
PipelineID.getFromProtobuf(proto.getPipelineID()));
   }
 
   public PipelineID getPipelineID() {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CreatePipelineCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CreatePipelineCommand.java
index ebd5bcfdcc3..1c434756202 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CreatePipelineCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CreatePipelineCommand.java
@@ -17,10 +17,10 @@
 
 package org.apache.hadoop.ozone.protocol.commands;
 
-import com.google.common.base.Preconditions;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Objects;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -124,7 +124,7 @@ public CreatePipelineCommandProto getProto() {
 
   public static CreatePipelineCommand getFromProtobuf(
       CreatePipelineCommandProto createPipelineProto) {
-    Preconditions.checkNotNull(createPipelineProto);
+    Objects.requireNonNull(createPipelineProto, "createPipelineProto == null");
     return new CreatePipelineCommand(createPipelineProto.getCmdId(),
         PipelineID.getFromProtobuf(createPipelineProto.getPipelineID()),
         createPipelineProto.getType(), createPipelineProto.getFactor(),
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java
index 193c67576c2..76ff272e0e9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.protocol.commands;
 
-import com.google.common.base.Preconditions;
+import java.util.Objects;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeleteContainerCommandProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
@@ -88,7 +88,7 @@ public boolean isForce() {
 
   public static DeleteContainerCommand getFromProtobuf(
       DeleteContainerCommandProto protoMessage) {
-    Preconditions.checkNotNull(protoMessage);
+    Objects.requireNonNull(protoMessage, "protoMessage == null");
 
     DeleteContainerCommand cmd =
         new DeleteContainerCommand(protoMessage.getContainerID(),
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/FinalizeNewLayoutVersionCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/FinalizeNewLayoutVersionCommand.java
index 0d5c02202dd..4d284171b20 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/FinalizeNewLayoutVersionCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/FinalizeNewLayoutVersionCommand.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.protocol.commands;
 
-import com.google.common.base.Preconditions;
+import java.util.Objects;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.FinalizeNewLayoutVersionCommandProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
@@ -67,7 +67,7 @@ public FinalizeNewLayoutVersionCommandProto getProto() {
 
   public static  FinalizeNewLayoutVersionCommand getFromProtobuf(
       FinalizeNewLayoutVersionCommandProto finalizeProto) {
-    Preconditions.checkNotNull(finalizeProto);
+    Objects.requireNonNull(finalizeProto, "finalizeProto == null");
     return new FinalizeNewLayoutVersionCommand(
         finalizeProto.getFinalizeNewLayoutVersion(),
         finalizeProto.getDataNodeLayoutVersion(), finalizeProto.getCmdId());
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconcileContainerCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconcileContainerCommand.java
index f7879a33645..6aad68cd877 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconcileContainerCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconcileContainerCommand.java
@@ -19,7 +19,6 @@
 
 import static java.util.Collections.emptySet;
 
-import com.google.common.base.Preconditions;
 import java.util.List;
 import java.util.Objects;
 import java.util.Set;
@@ -66,7 +65,7 @@ public long getContainerID() {
   }
 
   public static ReconcileContainerCommand 
getFromProtobuf(ReconcileContainerCommandProto protoMessage) {
-    Preconditions.checkNotNull(protoMessage);
+    Objects.requireNonNull(protoMessage, "protoMessage == null");
 
     List<HddsProtos.DatanodeDetailsProto> peers = protoMessage.getPeersList();
     Set<DatanodeDetails> peerNodes = !peers.isEmpty()
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java
index 3628c2c9e6a..59f606608d0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.protocol.commands;
 
-import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import java.util.Arrays;
 import java.util.List;
@@ -89,7 +88,7 @@ public ReconstructECContainersCommandProto getProto() {
 
   public static ReconstructECContainersCommand getFromProtobuf(
       ReconstructECContainersCommandProto protoMessage) {
-    Preconditions.checkNotNull(protoMessage);
+    Objects.requireNonNull(protoMessage, "protoMessage == null");
 
     List<DatanodeDetailsAndReplicaIndex> srcDatanodeDetails =
         protoMessage.getSourcesList().stream()
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RefreshVolumeUsageCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RefreshVolumeUsageCommand.java
index 6872b152e15..deceeb9c8b6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RefreshVolumeUsageCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RefreshVolumeUsageCommand.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.protocol.commands;
 
-import com.google.common.base.Preconditions;
+import java.util.Objects;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.RefreshVolumeUsageCommandProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 
@@ -51,7 +51,7 @@ public RefreshVolumeUsageCommandProto getProto() {
 
   public static RefreshVolumeUsageCommand getFromProtobuf(
       RefreshVolumeUsageCommandProto refreshVolumeUsageProto) {
-    Preconditions.checkNotNull(refreshVolumeUsageProto);
+    Objects.requireNonNull(refreshVolumeUsageProto);
     return new RefreshVolumeUsageCommand();
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
index 826af4fdd36..8574909b386 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
@@ -19,8 +19,8 @@
 
 import static java.util.Collections.emptyList;
 
-import com.google.common.base.Preconditions;
 import java.util.List;
+import java.util.Objects;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
@@ -110,7 +110,7 @@ public ReplicateContainerCommandProto getProto() {
 
   public static ReplicateContainerCommand getFromProtobuf(
       ReplicateContainerCommandProto protoMessage) {
-    Preconditions.checkNotNull(protoMessage);
+    Objects.requireNonNull(protoMessage, "protoMessage == null");
 
     List<DatanodeDetailsProto> sources = protoMessage.getSourcesList();
     List<DatanodeDetails> sourceNodes = !sources.isEmpty()
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SetNodeOperationalStateCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SetNodeOperationalStateCommand.java
index 51aaf87ae34..d042369cf70 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SetNodeOperationalStateCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SetNodeOperationalStateCommand.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.protocol.commands;
 
-import com.google.common.base.Preconditions;
+import java.util.Objects;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SetNodeOperationalStateCommandProto;
@@ -80,7 +80,7 @@ public long getStateExpiryEpochSeconds() {
 
   public static SetNodeOperationalStateCommand getFromProtobuf(
       SetNodeOperationalStateCommandProto cmdProto) {
-    Preconditions.checkNotNull(cmdProto);
+    Objects.requireNonNull(cmdProto, "cmdProto == null");
     return new SetNodeOperationalStateCommand(cmdProto.getCmdId(),
         cmdProto.getNodeOperationalState(),
         cmdProto.getStateExpiryEpochSeconds());


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to