This is an automated email from the ASF dual-hosted git repository.

dineshc pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 9f75e5a  HDDS-2378 - Change OZONE as string used in the code where 
OzoneConsts.OZONE is suitable (#103)
9f75e5a is described below

commit 9f75e5ab49f5ed6b5aaaca2acab840e365b69522
Author: Istvan Fajth <pi...@cloudera.com>
AuthorDate: Wed Oct 30 15:58:47 2019 +0100

    HDDS-2378 - Change OZONE as string used in the code where OzoneConsts.OZONE 
is suitable (#103)
    
    * HDDS-2378 - Change OZONE as string used in the code where 
OzoneConsts.OZONE is suitable
---
 .../common/impl/TestContainerDataYaml.java         | 21 ++++---
 .../container/keyvalue/TestBlockManagerImpl.java   | 11 ++--
 .../container/keyvalue/TestKeyValueContainer.java  | 12 ++--
 .../metrics/SCMContainerManagerMetrics.java        |  3 +-
 .../algorithms/SCMContainerPlacementMetrics.java   |  3 +-
 .../hadoop/hdds/scm/node/SCMNodeMetrics.java       |  3 +-
 .../hdds/scm/pipeline/SCMPipelineMetrics.java      |  3 +-
 .../hdds/scm/server/SCMContainerMetrics.java       |  3 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java    | 27 +++++----
 .../container/TestCloseContainerEventHandler.java  |  5 +-
 .../scm/container/TestSCMContainerManager.java     | 15 ++---
 .../hdds/scm/node/TestContainerPlacement.java      |  3 +-
 .../apache/hadoop/ozone/util/OzoneVersionInfo.java |  3 +-
 .../TestContainerStateManagerIntegration.java      | 69 ++++++++++++----------
 .../metrics/TestSCMContainerManagerMetrics.java    |  6 +-
 .../hadoop/ozone/TestContainerOperations.java      |  2 +-
 .../TestContainerStateMachineIdempotency.java      |  3 +-
 .../hadoop/ozone/TestStorageContainerManager.java  |  4 +-
 .../ozone/client/rpc/Test2WayCommitInRatis.java    |  4 +-
 .../hadoop/ozone/client/rpc/TestCommitWatcher.java |  5 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  2 +-
 .../ozone/client/rpc/TestWatchForCommit.java       | 10 ++--
 .../apache/hadoop/ozone/ozShell/TestS3Shell.java   |  4 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java    |  4 +-
 .../hadoop/ozone/scm/TestContainerSmallFile.java   | 10 ++--
 .../scm/TestGetCommittedBlockLengthAndPutKey.java  |  8 +--
 .../hadoop/ozone/scm/TestXceiverClientManager.java | 22 ++++---
 .../hadoop/ozone/scm/TestXceiverClientMetrics.java |  8 ++-
 .../hadoop/ozone/om/TestS3BucketManager.java       |  2 +-
 ...TestOzoneManagerDoubleBufferWithOMResponse.java |  3 +-
 .../om/request/key/TestOMAllocateBlockRequest.java |  4 +-
 .../om/request/key/TestOMKeyCommitRequest.java     |  4 +-
 .../om/request/key/TestOMKeyCreateRequest.java     |  4 +-
 .../s3/bucket/TestS3BucketDeleteRequest.java       |  3 +-
 .../s3/bucket/TestS3BucketDeleteResponse.java      |  3 +-
 .../s3/endpoint/TestAbortMultipartUpload.java      |  7 ++-
 .../hadoop/ozone/s3/endpoint/TestBucketDelete.java | 11 ++--
 .../hadoop/ozone/s3/endpoint/TestBucketHead.java   |  5 +-
 .../s3/endpoint/TestInitiateMultipartUpload.java   |  9 +--
 .../hadoop/ozone/s3/endpoint/TestListParts.java    | 29 +++++----
 .../s3/endpoint/TestMultipartUploadComplete.java   | 33 ++++++-----
 .../s3/endpoint/TestMultipartUploadWithCopy.java   | 32 +++++-----
 .../hadoop/ozone/s3/endpoint/TestObjectPut.java    |  3 +-
 .../hadoop/ozone/s3/endpoint/TestPartUpload.java   | 24 +++++---
 .../hadoop/ozone/s3/endpoint/TestRootList.java     |  3 +-
 .../ozone/genesis/BenchMarkContainerStateMap.java  | 11 ++--
 46 files changed, 263 insertions(+), 200 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 97d0206..e000ae5 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -48,6 +49,9 @@ public class TestContainerDataYaml {
   private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
   private static final Instant SCAN_TIME = Instant.now();
 
+  private static final String VOLUME_OWNER = "hdfs";
+  private static final String CONTAINER_DB_TYPE = "RocksDB";
+
   /**
    * Creates a .container file. cleanup() should be called at the end of the
    * test when container file is created.
@@ -60,7 +64,7 @@ public class TestContainerDataYaml {
     KeyValueContainerData keyValueContainerData = new KeyValueContainerData(
         containerID, MAXSIZE, UUID.randomUUID().toString(),
         UUID.randomUUID().toString());
-    keyValueContainerData.setContainerDBType("RocksDB");
+    keyValueContainerData.setContainerDBType(CONTAINER_DB_TYPE);
     keyValueContainerData.setMetadataPath(testRoot);
     keyValueContainerData.setChunksPath(testRoot);
     keyValueContainerData.updateDataScanTime(SCAN_TIME);
@@ -93,7 +97,7 @@ public class TestContainerDataYaml {
     assertEquals(containerID, kvData.getContainerID());
     assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
         .getContainerType());
-    assertEquals("RocksDB", kvData.getContainerDBType());
+    assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType());
     assertEquals(containerFile.getParent(), kvData.getMetadataPath());
     assertEquals(containerFile.getParent(), kvData.getChunksPath());
     assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData
@@ -108,8 +112,8 @@ public class TestContainerDataYaml {
         kvData.getDataScanTimestamp().longValue());
 
     // Update ContainerData.
-    kvData.addMetadata("VOLUME", "hdfs");
-    kvData.addMetadata("OWNER", "ozone");
+    kvData.addMetadata(OzoneConsts.VOLUME, VOLUME_OWNER);
+    kvData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE);
     kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
 
 
@@ -124,15 +128,16 @@ public class TestContainerDataYaml {
     assertEquals(containerID, kvData.getContainerID());
     assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
         .getContainerType());
-    assertEquals("RocksDB", kvData.getContainerDBType());
+    assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType());
     assertEquals(containerFile.getParent(), kvData.getMetadataPath());
     assertEquals(containerFile.getParent(), kvData.getChunksPath());
     assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
         .getState());
     assertEquals(1, kvData.getLayOutVersion());
     assertEquals(2, kvData.getMetadata().size());
-    assertEquals("hdfs", kvData.getMetadata().get("VOLUME"));
-    assertEquals("ozone", kvData.getMetadata().get("OWNER"));
+    assertEquals(VOLUME_OWNER, kvData.getMetadata().get(OzoneConsts.VOLUME));
+    assertEquals(OzoneConsts.OZONE,
+        kvData.getMetadata().get(OzoneConsts.OWNER));
     assertEquals(MAXSIZE, kvData.getMaxSize());
     assertTrue(kvData.lastDataScanTime().isPresent());
     assertEquals(SCAN_TIME, kvData.lastDataScanTime().get());
@@ -176,7 +181,7 @@ public class TestContainerDataYaml {
       //Checking the Container file data is consistent or not
       assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
           .getState());
-      assertEquals("RocksDB", kvData.getContainerDBType());
+      assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType());
       assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
           .getContainerType());
       assertEquals(9223372036854775807L, kvData.getContainerID());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
index 1d580a0..66cf790 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
@@ -91,8 +92,9 @@ public class TestBlockManagerImpl {
     // Creating BlockData
     blockID = new BlockID(1L, 1L);
     blockData = new BlockData(blockID);
-    blockData.addMetadata("VOLUME", "ozone");
-    blockData.addMetadata("OWNER", "hdfs");
+    blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
+    blockData.addMetadata(OzoneConsts.OWNER,
+        OzoneConsts.OZONE_SIMPLE_HDFS_USER);
     List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
     ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
         .getLocalID(), 0), 0, 1024);
@@ -156,8 +158,9 @@ public class TestBlockManagerImpl {
     for (long i = 2; i <= 10; i++) {
       blockID = new BlockID(1L, i);
       blockData = new BlockData(blockID);
-      blockData.addMetadata("VOLUME", "ozone");
-      blockData.addMetadata("OWNER", "hdfs");
+      blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
+      blockData.addMetadata(OzoneConsts.OWNER,
+          OzoneConsts.OZONE_SIMPLE_HDFS_USER);
       List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
           .getLocalID(), 0), 0, 1024);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 8597f22..f63de20 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
@@ -138,8 +139,9 @@ public class TestKeyValueContainer {
         // Creating BlockData
         BlockID blockID = new BlockID(containerId, i);
         BlockData blockData = new BlockData(blockID);
-        blockData.addMetadata("VOLUME", "ozone");
-        blockData.addMetadata("OWNER", "hdfs");
+        blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
+        blockData.addMetadata(OzoneConsts.OWNER,
+            OzoneConsts.OZONE_SIMPLE_HDFS_USER);
         List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
         ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
             .getLocalID(), 0), 0, 1024);
@@ -350,8 +352,8 @@ public class TestKeyValueContainer {
   public void testUpdateContainer() throws IOException {
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
     Map<String, String> metadata = new HashMap<>();
-    metadata.put("VOLUME", "ozone");
-    metadata.put("OWNER", "hdfs");
+    metadata.put(OzoneConsts.VOLUME, OzoneConsts.OZONE);
+    metadata.put(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
     keyValueContainer.update(metadata, true);
 
     keyValueContainerData = keyValueContainer
@@ -376,7 +378,7 @@ public class TestKeyValueContainer {
       keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
       keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
       Map<String, String> metadata = new HashMap<>();
-      metadata.put("VOLUME", "ozone");
+      metadata.put(OzoneConsts.VOLUME, OzoneConsts.OZONE);
       keyValueContainer.update(metadata, false);
       fail("testUpdateContainerUnsupportedRequest failed");
     } catch (StorageContainerException ex) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java
index e9a2579..41a8844 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java
@@ -22,11 +22,12 @@ import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.ozone.OzoneConsts;
 
 /**
  * Class contains metrics related to ContainerManager.
  */
-@Metrics(about = "SCM ContainerManager metrics", context = "ozone")
+@Metrics(about = "SCM ContainerManager metrics", context = OzoneConsts.OZONE)
 public final class SCMContainerManagerMetrics {
 
   private static final String SOURCE_NAME =
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
index fb709b1..1ca68bd 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
@@ -28,11 +28,12 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.ozone.OzoneConsts;
 
 /**
  * This class is for maintaining Topology aware container placement statistics.
  */
-@Metrics(about="SCM Container Placement Metrics", context = "ozone")
+@Metrics(about="SCM Container Placement Metrics", context = OzoneConsts.OZONE)
 public class SCMContainerPlacementMetrics implements MetricsSource {
   public static final String SOURCE_NAME =
       SCMContainerPlacementMetrics.class.getSimpleName();
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
index 1596523..676b2e9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
@@ -37,12 +37,13 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.ozone.OzoneConsts;
 
 /**
  * This class maintains Node related metrics.
  */
 @InterfaceAudience.Private
-@Metrics(about = "SCM NodeManager Metrics", context = "ozone")
+@Metrics(about = "SCM NodeManager Metrics", context = OzoneConsts.OZONE)
 public final class SCMNodeMetrics implements MetricsSource {
 
   private static final String SOURCE_NAME =
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
index d0f7f6e..b6a1445 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.ozone.OzoneConsts;
 
 import java.util.Map;
 import java.util.Optional;
@@ -38,7 +39,7 @@ import java.util.concurrent.ConcurrentHashMap;
  * This class maintains Pipeline related metrics.
  */
 @InterfaceAudience.Private
-@Metrics(about = "SCM PipelineManager Metrics", context = "ozone")
+@Metrics(about = "SCM PipelineManager Metrics", context = OzoneConsts.OZONE)
 public final class SCMPipelineMetrics implements MetricsSource {
 
   private static final String SOURCE_NAME =
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
index 5e8e137..d9f3dbe 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
@@ -33,12 +33,13 @@ import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.ozone.OzoneConsts;
 
 /**
  * Metrics source to report number of containers in different states.
  */
 @InterfaceAudience.Private
-@Metrics(about = "SCM Container Manager Metrics", context = "ozone")
+@Metrics(about = "SCM Container Manager Metrics", context = OzoneConsts.OZONE)
 public class SCMContainerMetrics implements MetricsSource {
 
   private final SCMMXBean scmmxBean;
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index ae8aee9..a012d64 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -73,7 +74,6 @@ public class TestBlockManager {
   private final static long DEFAULT_BLOCK_SIZE = 128 * MB;
   private static HddsProtos.ReplicationFactor factor;
   private static HddsProtos.ReplicationType type;
-  private static String containerOwner = "OZONE";
   private static EventQueue eventQueue;
   private int numContainerPerOwnerInPipeline;
   private OzoneConfiguration conf;
@@ -137,7 +137,7 @@ public class TestBlockManager {
       return !blockManager.isScmInSafeMode();
     }, 10, 1000 * 5);
     AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner, new ExcludeList());
+        type, factor, OzoneConsts.OZONE, new ExcludeList());
     Assert.assertNotNull(block);
   }
 
@@ -157,7 +157,7 @@ public class TestBlockManager {
     excludeList
         .addPipeline(pipelineManager.getPipelines(type, 
factor).get(0).getId());
     AllocatedBlock block = blockManager
-        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
+        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE,
             excludeList);
     Assert.assertNotNull(block);
     Assert.assertNotEquals(block.getPipeline().getId(),
@@ -167,7 +167,7 @@ public class TestBlockManager {
       excludeList.addPipeline(pipeline.getId());
     }
     block = blockManager
-        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
+        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE,
             excludeList);
     Assert.assertNotNull(block);
     Assert.assertTrue(
@@ -193,7 +193,8 @@ public class TestBlockManager {
       CompletableFuture.supplyAsync(() -> {
         try {
           future.complete(blockManager
-              .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
+              .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor,
+                  OzoneConsts.OZONE,
                   new ExcludeList()));
         } catch (IOException e) {
           future.completeExceptionally(e);
@@ -220,7 +221,7 @@ public class TestBlockManager {
     long size = 6 * GB;
     thrown.expectMessage("Unsupported block size");
     AllocatedBlock block = blockManager.allocateBlock(size,
-        type, factor, containerOwner, new ExcludeList());
+        type, factor, OzoneConsts.OZONE, new ExcludeList());
   }
 
 
@@ -235,7 +236,7 @@ public class TestBlockManager {
     thrown.expectMessage("SafeModePrecheck failed for "
         + "allocateBlock");
     blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner, new ExcludeList());
+        type, factor, OzoneConsts.OZONE, new ExcludeList());
   }
 
   @Test
@@ -246,7 +247,7 @@ public class TestBlockManager {
       return !blockManager.isScmInSafeMode();
     }, 10, 1000 * 5);
     Assert.assertNotNull(blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner, new ExcludeList()));
+        type, factor, OzoneConsts.OZONE, new ExcludeList()));
   }
 
   @Test(timeout = 10000)
@@ -260,13 +261,13 @@ public class TestBlockManager {
     pipelineManager.createPipeline(type, factor);
 
     AllocatedBlock allocatedBlock = blockManager
-        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
+        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE,
             new ExcludeList());
     // block should be allocated in different pipelines
     GenericTestUtils.waitFor(() -> {
       try {
         AllocatedBlock block = blockManager
-            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
+            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE,
                 new ExcludeList());
         return !block.getPipeline().getId()
             .equals(allocatedBlock.getPipeline().getId());
@@ -311,7 +312,7 @@ public class TestBlockManager {
     GenericTestUtils.waitFor(() -> {
       try {
         blockManager
-            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
+            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE,
                 new ExcludeList());
       } catch (IOException e) {
       }
@@ -335,7 +336,7 @@ public class TestBlockManager {
     GenericTestUtils.waitFor(() -> {
       try {
         blockManager
-            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
+            .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE,
                 new ExcludeList());
       } catch (IOException e) {
       }
@@ -356,7 +357,7 @@ public class TestBlockManager {
     }
     Assert.assertEquals(0, pipelineManager.getPipelines(type, factor).size());
     Assert.assertNotNull(blockManager
-        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner,
+        .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE,
             new ExcludeList()));
     Assert.assertEquals(1, pipelineManager.getPipelines(type, factor).size());
   }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index a8364a4..b022fd9 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -28,6 +28,7 @@ import 
org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
@@ -118,7 +119,7 @@ public class TestCloseContainerEventHandler {
 
     ContainerInfo container = containerManager
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, "ozone");
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     ContainerID id = container.containerID();
     DatanodeDetails datanode = pipelineManager
         .getPipeline(container.getPipelineID()).getFirstNode();
@@ -138,7 +139,7 @@ public class TestCloseContainerEventHandler {
         .captureLogs(CloseContainerEventHandler.LOG);
     ContainerInfo container = containerManager
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, "ozone");
+            HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
     ContainerID id = container.containerID();
     int[] closeCount = new int[3];
     eventQueue.fireEvent(CLOSE_CONTAINER, id);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index 342c240..6436af0 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
@@ -69,7 +70,6 @@ public class TestSCMContainerManager {
   private static PipelineManager pipelineManager;
   private static File testDir;
   private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
   private static Random random;
   private static HddsProtos.ReplicationFactor replicationFactor;
   private static HddsProtos.ReplicationType replicationType;
@@ -124,7 +124,7 @@ public class TestSCMContainerManager {
   @Test
   public void testallocateContainer() throws Exception {
     ContainerInfo containerInfo = containerManager.allocateContainer(
-        replicationType, replicationFactor, containerOwner);
+        replicationType, replicationFactor, OzoneConsts.OZONE);
     Assert.assertNotNull(containerInfo);
   }
 
@@ -139,7 +139,7 @@ public class TestSCMContainerManager {
     Set<UUID> pipelineList = new TreeSet<>();
     for (int x = 0; x < 30; x++) {
       ContainerInfo containerInfo = containerManager.allocateContainer(
-          replicationType, replicationFactor, containerOwner);
+          replicationType, replicationFactor, OzoneConsts.OZONE);
 
       Assert.assertNotNull(containerInfo);
       Assert.assertNotNull(containerInfo.getPipelineID());
@@ -165,7 +165,7 @@ public class TestSCMContainerManager {
         try {
           ContainerInfo containerInfo = containerManager
               .allocateContainer(replicationType, replicationFactor,
-                  containerOwner);
+                  OzoneConsts.OZONE);
 
           Assert.assertNotNull(containerInfo);
           Assert.assertNotNull(containerInfo.getPipelineID());
@@ -190,7 +190,7 @@ public class TestSCMContainerManager {
   @Test
   public void testGetContainer() throws IOException {
     ContainerInfo containerInfo = containerManager.allocateContainer(
-        replicationType, replicationFactor, containerOwner);
+        replicationType, replicationFactor, OzoneConsts.OZONE);
     Assert.assertNotNull(containerInfo);
     Pipeline pipeline  = pipelineManager
         .getPipeline(containerInfo.getPipelineID());
@@ -203,7 +203,7 @@ public class TestSCMContainerManager {
   public void testGetContainerWithPipeline() throws Exception {
     ContainerInfo contInfo = containerManager
         .allocateContainer(replicationType, replicationFactor,
-            containerOwner);
+            OzoneConsts.OZONE);
     // Add dummy replicas for container.
     Iterator<DatanodeDetails> nodes = pipelineManager
         .getPipeline(contInfo.getPipelineID()).getNodes().iterator();
@@ -309,7 +309,8 @@ public class TestSCMContainerManager {
       throws IOException {
     nodeManager.setSafemode(false);
     return containerManager
-        .allocateContainer(replicationType, replicationFactor, containerOwner);
+        .allocateContainer(replicationType, replicationFactor,
+            OzoneConsts.OZONE);
   }
 
 }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 4122005..3e4508d 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -164,7 +164,8 @@ public class TestContainerPlacement {
       ContainerInfo container = containerManager
           .allocateContainer(
               SCMTestUtils.getReplicationType(conf),
-              SCMTestUtils.getReplicationFactor(conf), "OZONE");
+              SCMTestUtils.getReplicationFactor(conf),
+              OzoneConsts.OZONE);
       assertEquals(SCMTestUtils.getReplicationFactor(conf).getNumber(),
           containerManager.getContainerReplicas(
               container.containerID()).size());
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
index 69c5791..5e06152 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.util;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.ClassUtil;
 import org.apache.hadoop.hdds.utils.HddsVersionInfo;
 import org.apache.hadoop.hdds.utils.VersionInfo;
@@ -36,7 +37,7 @@ public final class OzoneVersionInfo {
       LoggerFactory.getLogger(OzoneVersionInfo.class);
 
   public static final VersionInfo OZONE_VERSION_INFO =
-      new VersionInfo("ozone");
+      new VersionInfo(OzoneConsts.OZONE);
 
   private OzoneVersionInfo() {}
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index 9353749..fe612a0 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -64,7 +64,6 @@ public class TestContainerStateManagerIntegration {
   private StorageContainerManager scm;
   private ContainerManager containerManager;
   private ContainerStateManager containerStateManager;
-  private String containerOwner = "OZONE";
   private int numContainerPerOwnerInPipeline;
 
 
@@ -96,13 +95,13 @@ public class TestContainerStateManagerIntegration {
     // Allocate a container and verify the container info
     ContainerWithPipeline container1 = scm.getClientProtocolServer()
         .allocateContainer(SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
     ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+        .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline());
     Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
         info.getContainerID());
-    Assert.assertEquals(containerOwner, info.getOwner());
+    Assert.assertEquals(OzoneConsts.OZONE, info.getOwner());
     Assert.assertEquals(SCMTestUtils.getReplicationType(conf),
         info.getReplicationType());
     Assert.assertEquals(SCMTestUtils.getReplicationFactor(conf),
@@ -113,9 +112,9 @@ public class TestContainerStateManagerIntegration {
     ContainerWithPipeline container2 = scm.getClientProtocolServer()
         .allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
     int numContainers = containerStateManager
-        .getMatchingContainerIDs(containerOwner,
+        .getMatchingContainerIDs(OzoneConsts.OZONE,
             SCMTestUtils.getReplicationType(conf),
             SCMTestUtils.getReplicationFactor(conf),
             HddsProtos.LifeCycleState.OPEN).size();
@@ -130,9 +129,9 @@ public class TestContainerStateManagerIntegration {
     // Allocate a container and verify the container info
     ContainerWithPipeline container1 = scm.getClientProtocolServer()
         .allocateContainer(SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
     ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+        .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline());
     Assert.assertNotNull(info);
 
@@ -158,7 +157,7 @@ public class TestContainerStateManagerIntegration {
       ContainerWithPipeline container = scm.getClientProtocolServer()
           .allocateContainer(
               SCMTestUtils.getReplicationType(conf),
-              SCMTestUtils.getReplicationFactor(conf), containerOwner);
+              SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
       if (i >= 5) {
         scm.getContainerManager().updateContainerState(container
                 .getContainerInfo().containerID(),
@@ -173,7 +172,7 @@ public class TestContainerStateManagerIntegration {
 
     long matchCount = result.stream()
         .filter(info ->
-            info.getOwner().equals(containerOwner))
+            info.getOwner().equals(OzoneConsts.OZONE))
         .filter(info ->
             info.getReplicationType() == SCMTestUtils.getReplicationType(conf))
         .filter(info ->
@@ -185,7 +184,7 @@ public class TestContainerStateManagerIntegration {
     Assert.assertEquals(5, matchCount);
     matchCount = result.stream()
         .filter(info ->
-            info.getOwner().equals(containerOwner))
+            info.getOwner().equals(OzoneConsts.OZONE))
         .filter(info ->
             info.getReplicationType() == SCMTestUtils.getReplicationType(conf))
         .filter(info ->
@@ -202,7 +201,7 @@ public class TestContainerStateManagerIntegration {
     long cid;
     ContainerWithPipeline container1 = scm.getClientProtocolServer().
         allocateContainer(SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
     cid = container1.getContainerInfo().getContainerID();
 
     // each getMatchingContainer call allocates a container in the
@@ -210,7 +209,7 @@ public class TestContainerStateManagerIntegration {
     // containers.
     for (int i = 1; i < numContainerPerOwnerInPipeline; i++) {
       ContainerInfo info = containerManager
-          .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+          .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
               container1.getPipeline());
       Assert.assertTrue(info.getContainerID() > cid);
       cid = info.getContainerID();
@@ -219,7 +218,7 @@ public class TestContainerStateManagerIntegration {
     // At this point there are already three containers in the pipeline.
     // next container should be the same as first container
     ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+        .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline());
     Assert.assertEquals(container1.getContainerInfo().getContainerID(),
         info.getContainerID());
@@ -230,7 +229,7 @@ public class TestContainerStateManagerIntegration {
     long cid;
     ContainerWithPipeline container1 = scm.getClientProtocolServer().
         allocateContainer(SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
     cid = container1.getContainerInfo().getContainerID();
 
     // each getMatchingContainer call allocates a container in the
@@ -238,7 +237,7 @@ public class TestContainerStateManagerIntegration {
     // containers.
     for (int i = 1; i < numContainerPerOwnerInPipeline; i++) {
       ContainerInfo info = containerManager
-          .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+          .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
               container1.getPipeline());
       Assert.assertTrue(info.getContainerID() > cid);
       cid = info.getContainerID();
@@ -247,7 +246,7 @@ public class TestContainerStateManagerIntegration {
     // At this point there are already three containers in the pipeline.
     // next container should be the same as first container
     ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+        .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline(), Collections.singletonList(new
                 ContainerID(1)));
     Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
@@ -260,19 +259,19 @@ public class TestContainerStateManagerIntegration {
     long cid;
     ContainerWithPipeline container1 = scm.getClientProtocolServer().
         allocateContainer(SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
     cid = container1.getContainerInfo().getContainerID();
 
     for (int i = 1; i < numContainerPerOwnerInPipeline; i++) {
       ContainerInfo info = containerManager
-          .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+          .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
               container1.getPipeline());
       Assert.assertTrue(info.getContainerID() > cid);
       cid = info.getContainerID();
     }
 
     ContainerInfo info = containerManager
-        .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+        .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline(), Arrays.asList(new ContainerID(1), new
                 ContainerID(2), new ContainerID(3)));
     Assert.assertEquals(info.getContainerID(), 4);
@@ -284,7 +283,7 @@ public class TestContainerStateManagerIntegration {
       throws IOException, InterruptedException {
     ContainerWithPipeline container1 = scm.getClientProtocolServer().
         allocateContainer(SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
     Map<Long, Long> container2MatchedCount = new ConcurrentHashMap<>();
 
     // allocate blocks using multiple threads
@@ -292,7 +291,7 @@ public class TestContainerStateManagerIntegration {
     for (int i = 0; i < numBlockAllocates; i++) {
       CompletableFuture.supplyAsync(() -> {
         ContainerInfo info = containerManager
-            .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+            .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
                 container1.getPipeline());
         container2MatchedCount
             .compute(info.getContainerID(), (k, v) -> v == null ? 1L : v + 1);
@@ -323,7 +322,7 @@ public class TestContainerStateManagerIntegration {
   @Test
   public void testUpdateContainerState() throws IOException {
     NavigableSet<ContainerID> containerList = containerStateManager
-        .getMatchingContainerIDs(containerOwner,
+        .getMatchingContainerIDs(OzoneConsts.OZONE,
             SCMTestUtils.getReplicationType(conf),
             SCMTestUtils.getReplicationFactor(conf),
             HddsProtos.LifeCycleState.OPEN);
@@ -335,8 +334,9 @@ public class TestContainerStateManagerIntegration {
     ContainerWithPipeline container1 = scm.getClientProtocolServer()
         .allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
+    containers = containerStateManager.getMatchingContainerIDs(
+        OzoneConsts.OZONE,
         SCMTestUtils.getReplicationType(conf),
         SCMTestUtils.getReplicationFactor(conf),
         HddsProtos.LifeCycleState.OPEN).size();
@@ -345,7 +345,8 @@ public class TestContainerStateManagerIntegration {
     containerManager
         .updateContainerState(container1.getContainerInfo().containerID(),
             HddsProtos.LifeCycleEvent.FINALIZE);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
+    containers = containerStateManager.getMatchingContainerIDs(
+        OzoneConsts.OZONE,
         SCMTestUtils.getReplicationType(conf),
         SCMTestUtils.getReplicationFactor(conf),
         HddsProtos.LifeCycleState.CLOSING).size();
@@ -354,7 +355,8 @@ public class TestContainerStateManagerIntegration {
     containerManager
         .updateContainerState(container1.getContainerInfo().containerID(),
             HddsProtos.LifeCycleEvent.CLOSE);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
+    containers = containerStateManager.getMatchingContainerIDs(
+        OzoneConsts.OZONE,
         SCMTestUtils.getReplicationType(conf),
         SCMTestUtils.getReplicationFactor(conf),
         HddsProtos.LifeCycleState.CLOSED).size();
@@ -363,7 +365,8 @@ public class TestContainerStateManagerIntegration {
     containerManager
         .updateContainerState(container1.getContainerInfo().containerID(),
             HddsProtos.LifeCycleEvent.DELETE);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
+    containers = containerStateManager.getMatchingContainerIDs(
+        OzoneConsts.OZONE,
         SCMTestUtils.getReplicationType(conf),
         SCMTestUtils.getReplicationFactor(conf),
         HddsProtos.LifeCycleState.DELETING).size();
@@ -372,7 +375,8 @@ public class TestContainerStateManagerIntegration {
     containerManager
         .updateContainerState(container1.getContainerInfo().containerID(),
             HddsProtos.LifeCycleEvent.CLEANUP);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
+    containers = containerStateManager.getMatchingContainerIDs(
+        OzoneConsts.OZONE,
         SCMTestUtils.getReplicationType(conf),
         SCMTestUtils.getReplicationFactor(conf),
         HddsProtos.LifeCycleState.DELETED).size();
@@ -383,14 +387,15 @@ public class TestContainerStateManagerIntegration {
     ContainerWithPipeline container3 = scm.getClientProtocolServer()
         .allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
     containerManager
         .updateContainerState(container3.getContainerInfo().containerID(),
             HddsProtos.LifeCycleEvent.FINALIZE);
     containerManager
         .updateContainerState(container3.getContainerInfo().containerID(),
             HddsProtos.LifeCycleEvent.CLOSE);
-    containers = containerStateManager.getMatchingContainerIDs(containerOwner,
+    containers = containerStateManager.getMatchingContainerIDs(
+        OzoneConsts.OZONE,
         SCMTestUtils.getReplicationType(conf),
         SCMTestUtils.getReplicationFactor(conf),
         HddsProtos.LifeCycleState.CLOSED).size();
@@ -420,7 +425,7 @@ public class TestContainerStateManagerIntegration {
     ContainerWithPipeline container = scm.getClientProtocolServer()
         .allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
 
     ContainerID id = container.getContainerInfo().containerID();
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
index 5643cb6..f2c31d1 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -50,7 +51,6 @@ public class TestSCMContainerManagerMetrics {
 
   private MiniOzoneCluster cluster;
   private StorageContainerManager scm;
-  private String containerOwner = "OZONE";
 
   @Before
   public void setup() throws Exception {
@@ -77,7 +77,7 @@ public class TestSCMContainerManagerMetrics {
 
     ContainerInfo containerInfo = containerManager.allocateContainer(
         HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, containerOwner);
+        HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
 
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
     Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
@@ -86,7 +86,7 @@ public class TestSCMContainerManagerMetrics {
     try {
       containerManager.allocateContainer(
           HddsProtos.ReplicationType.RATIS,
-          HddsProtos.ReplicationFactor.THREE, containerOwner);
+          HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
       fail("testContainerOpsMetrics failed");
     } catch (IOException ex) {
       // Here it should fail, so it should have the old metric value.
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
index 30321ba..cd975cf 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
@@ -69,7 +69,7 @@ public class TestContainerOperations {
   public void testCreate() throws Exception {
     ContainerWithPipeline container = storageClient.createContainer(HddsProtos
         .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor
-        .ONE, "OZONE");
+        .ONE, OzoneConsts.OZONE);
     assertEquals(container.getContainerInfo().getContainerID(), storageClient
         .getContainer(container.getContainerInfo().getContainerID())
         .getContainerID());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
index 2d2d028..1175229 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
@@ -54,7 +54,6 @@ public class TestContainerStateMachineIdempotency {
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
 
   @BeforeClass
   public static void init() throws Exception {
@@ -81,7 +80,7 @@ public class TestContainerStateMachineIdempotency {
   public void testContainerStateMachineIdempotency() throws Exception {
     ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     long containerID = container.getContainerInfo().getContainerID();
     Pipeline pipeline = container.getPipeline();
     XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 52cdb76..1c29da0 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -182,7 +182,7 @@ public class TestStorageContainerManager {
       try {
         ContainerWithPipeline container2 = mockClientServer
             .allocateContainer(SCMTestUtils.getReplicationType(ozoneConf),
-            HddsProtos.ReplicationFactor.ONE,  "OZONE");
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
         if (expectPermissionDenied) {
           fail("Operation should fail, expecting an IOException here.");
         } else {
@@ -195,7 +195,7 @@ public class TestStorageContainerManager {
       try {
         ContainerWithPipeline container3 = mockClientServer
             .allocateContainer(SCMTestUtils.getReplicationType(ozoneConf),
-            HddsProtos.ReplicationFactor.ONE, "OZONE");
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
         if (expectPermissionDenied) {
           fail("Operation should fail, expecting an IOException here.");
         } else {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
index cf570d2..fd2cea3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
@@ -58,7 +59,6 @@ public class Test2WayCommitInRatis {
   private int blockSize;
   private StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -123,7 +123,7 @@ public class Test2WayCommitInRatis {
 
     ContainerWithPipeline container1 = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
+            HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
     XceiverClientSpi xceiverClient = clientManager
         .acquireClient(container1.getPipeline());
     Assert.assertEquals(1, xceiverClient.getRefcount());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
index ea51900..41ebb63 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
@@ -72,7 +72,6 @@ public class TestCommitWatcher {
   private static String keyString;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -132,7 +131,7 @@ public class TestCommitWatcher {
     XceiverClientManager clientManager = new XceiverClientManager(conf);
     ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
+            HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
     Pipeline pipeline = container.getPipeline();
     long containerId = container.getContainerInfo().getContainerID();
     XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
@@ -208,7 +207,7 @@ public class TestCommitWatcher {
     XceiverClientManager clientManager = new XceiverClientManager(conf);
     ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
+            HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
     Pipeline pipeline = container.getPipeline();
     long containerId = container.getContainerInfo().getContainerID();
     XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 9189c2f..5ef143c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -406,7 +406,7 @@ public abstract class TestOzoneRpcClientAbstract {
   public void testCreateS3BucketMapping()
       throws IOException, OzoneClientException {
     long currentTime = Time.now();
-    String userName = "ozone";
+    String userName = OzoneConsts.OZONE;
     String bucketName = UUID.randomUUID().toString();
     store.createS3Bucket(userName, bucketName);
     String volumeName = store.getOzoneVolumeName(bucketName);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index 9b59349..a5d601e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -30,6 +30,7 @@ import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolCli
 import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
@@ -73,7 +74,6 @@ public class TestWatchForCommit {
   private int blockSize;
   private StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -279,7 +279,7 @@ public class TestWatchForCommit {
     XceiverClientManager clientManager = new XceiverClientManager(conf);
     ContainerWithPipeline container1 = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
+            HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
     XceiverClientSpi xceiverClient = clientManager
         .acquireClient(container1.getPipeline());
     Assert.assertEquals(1, xceiverClient.getRefcount());
@@ -321,7 +321,7 @@ public class TestWatchForCommit {
     XceiverClientManager clientManager = new XceiverClientManager(conf);
     ContainerWithPipeline container1 = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
+            HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
     XceiverClientSpi xceiverClient = clientManager
         .acquireClient(container1.getPipeline());
     Assert.assertEquals(1, xceiverClient.getRefcount());
@@ -369,7 +369,7 @@ public class TestWatchForCommit {
 
     ContainerWithPipeline container1 = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
+            HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
     XceiverClientSpi xceiverClient = clientManager
         .acquireClient(container1.getPipeline());
     Assert.assertEquals(1, xceiverClient.getRefcount());
@@ -417,7 +417,7 @@ public class TestWatchForCommit {
 
     ContainerWithPipeline container1 = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, containerOwner);
+            HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
     XceiverClientSpi xceiverClient = clientManager
         .acquireClient(container1.getPipeline());
     Assert.assertEquals(1, xceiverClient.getRefcount());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java
index c55de0b..8b7fb1f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java
@@ -154,7 +154,7 @@ public class TestS3Shell {
 
     String s3Bucket = "bucket1";
     String commandOutput;
-    createS3Bucket("ozone", s3Bucket);
+    createS3Bucket(OzoneConsts.OZONE, s3Bucket);
 
     // WHEN
     String[] args =
@@ -200,7 +200,7 @@ public class TestS3Shell {
 
   private void createS3Bucket(String userName, String s3Bucket) {
     try {
-      client.createS3Bucket("ozone", s3Bucket);
+      client.createS3Bucket(OzoneConsts.OZONE, s3Bucket);
     } catch (IOException ex) {
       GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex);
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
index d62e9be..8a68a3a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -41,7 +42,6 @@ public class TestAllocateContainer {
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
@@ -69,7 +69,7 @@ public class TestAllocateContainer {
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
             SCMTestUtils.getReplicationFactor(conf),
-            containerOwner);
+            OzoneConsts.OZONE);
     Assert.assertNotNull(container);
     Assert.assertNotNull(container.getPipeline().getFirstNode());
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index 91b0c15..48ce4a6 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.junit.AfterClass;
@@ -54,7 +55,6 @@ public class TestContainerSmallFile {
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
 
   @BeforeClass
   public static void init() throws Exception {
@@ -82,7 +82,7 @@ public class TestContainerSmallFile {
     ContainerWithPipeline container =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(ozoneConfig),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     XceiverClientSpi client = xceiverClientManager
         .acquireClient(container.getPipeline());
     ContainerProtocolCalls.createContainer(client,
@@ -104,7 +104,7 @@ public class TestContainerSmallFile {
     ContainerWithPipeline container =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(ozoneConfig),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     XceiverClientSpi client = xceiverClientManager
         .acquireClient(container.getPipeline());
     ContainerProtocolCalls.createContainer(client,
@@ -127,7 +127,7 @@ public class TestContainerSmallFile {
     ContainerWithPipeline container =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(ozoneConfig),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     XceiverClientSpi client = xceiverClientManager
         .acquireClient(container.getPipeline());
     ContainerProtocolCalls.createContainer(client,
@@ -153,7 +153,7 @@ public class TestContainerSmallFile {
     ContainerWithPipeline container =
         storageContainerLocationClient.allocateContainer(
             HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     XceiverClientSpi client = xceiverClientManager
         .acquireClient(container.getPipeline());
     ContainerProtocolCalls.createContainer(client,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
index ad7a4a3..db3e7bd 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.scm.protocolPB.
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.junit.AfterClass;
@@ -57,7 +58,6 @@ public class TestGetCommittedBlockLengthAndPutKey {
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
 
   @BeforeClass
   public static void init() throws Exception {
@@ -85,7 +85,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
     ContainerProtos.GetCommittedBlockLengthResponseProto response;
     ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(SCMTestUtils.getReplicationType(ozoneConfig),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     long containerID = container.getContainerInfo().getContainerID();
     Pipeline pipeline = container.getPipeline();
     XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
@@ -118,7 +118,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
   public void testGetCommittedBlockLengthForInvalidBlock() throws Exception {
     ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(SCMTestUtils.getReplicationType(ozoneConfig),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     long containerID = container.getContainerInfo().getContainerID();
     XceiverClientSpi client = xceiverClientManager
         .acquireClient(container.getPipeline());
@@ -143,7 +143,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
     ContainerProtos.PutBlockResponseProto response;
     ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
     long containerID = container.getContainerInfo().getContainerID();
     Pipeline pipeline = container.getPipeline();
     XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
index 73b9704..b648633 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
@@ -50,7 +51,6 @@ public class TestXceiverClientManager {
   private static MiniOzoneCluster cluster;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -84,9 +84,10 @@ public class TestXceiverClientManager {
     XceiverClientManager clientManager = new XceiverClientManager(conf);
 
     ContainerWithPipeline container1 = storageContainerLocationClient
-        .allocateContainer(SCMTestUtils.getReplicationType(conf),
+        .allocateContainer(
+            SCMTestUtils.getReplicationType(conf),
             SCMTestUtils.getReplicationFactor(conf),
-            containerOwner);
+            OzoneConsts.OZONE);
     XceiverClientSpi client1 = clientManager
         .acquireClient(container1.getPipeline());
     Assert.assertEquals(1, client1.getRefcount());
@@ -95,7 +96,7 @@ public class TestXceiverClientManager {
         .allocateContainer(
             SCMTestUtils.getReplicationType(conf),
             SCMTestUtils.getReplicationFactor(conf),
-            containerOwner);
+            OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
     Assert.assertEquals(1, client2.getRefcount());
@@ -127,7 +128,7 @@ public class TestXceiverClientManager {
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
             HddsProtos.ReplicationFactor.ONE,
-            containerOwner);
+            OzoneConsts.OZONE);
     XceiverClientSpi client1 = clientManager
         .acquireClient(container1.getPipeline());
     Assert.assertEquals(1, client1.getRefcount());
@@ -137,7 +138,8 @@ public class TestXceiverClientManager {
     ContainerWithPipeline container2 =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            HddsProtos.ReplicationFactor.ONE, containerOwner);
+            HddsProtos.ReplicationFactor.ONE,
+            OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
     Assert.assertEquals(1, client2.getRefcount());
@@ -185,7 +187,8 @@ public class TestXceiverClientManager {
     ContainerWithPipeline container1 =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf),
+            OzoneConsts.OZONE);
     XceiverClientSpi client1 = clientManager
         .acquireClient(container1.getPipeline());
     Assert.assertEquals(1, client1.getRefcount());
@@ -196,7 +199,8 @@ public class TestXceiverClientManager {
     ContainerWithPipeline container2 =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+            SCMTestUtils.getReplicationFactor(conf),
+            OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
     Assert.assertEquals(1, client2.getRefcount());
@@ -237,7 +241,7 @@ public class TestXceiverClientManager {
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
             SCMTestUtils.getReplicationFactor(conf),
-            containerOwner);
+            OzoneConsts.OZONE);
     XceiverClientSpi client1 =
         clientManager.acquireClient(container1.getPipeline());
     clientManager.acquireClient(container1.getPipeline());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
index 8be0f87..d5ff39e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
@@ -38,6 +38,7 @@ import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
@@ -61,7 +62,6 @@ public class TestXceiverClientMetrics {
   private static MiniOzoneCluster cluster;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private static String containerOwner = "OZONE";
 
   @BeforeClass
   public static void init() throws Exception {
@@ -87,8 +87,10 @@ public class TestXceiverClientMetrics {
     XceiverClientManager clientManager = new XceiverClientManager(conf);
 
     ContainerWithPipeline container = storageContainerLocationClient
-        .allocateContainer(SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf), containerOwner);
+        .allocateContainer(
+            SCMTestUtils.getReplicationType(conf),
+            SCMTestUtils.getReplicationFactor(conf),
+            OzoneConsts.OZONE);
     XceiverClientSpi client = clientManager
         .acquireClient(container.getPipeline());
 
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java
index ef35d4d..0513876 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java
@@ -64,7 +64,7 @@ public class TestS3BucketManager {
   public void testOzoneVolumeNameForUser() throws IOException {
     S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
         volumeManager, bucketManager);
-    String userName = "ozone";
+    String userName = OzoneConsts.OZONE;
     String volumeName = s3BucketManager.getOzoneVolumeNameForUser(userName);
     assertEquals(OzoneConsts.OM_S3_VOLUME_PREFIX + userName, volumeName);
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
index 441f1c1..0cc8e40 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
@@ -25,6 +25,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.AuditMessage;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -441,7 +442,7 @@ public class TestOzoneManagerDoubleBufferWithOMResponse {
   private OMClientResponse createVolume(String volumeName,
       long transactionId) {
 
-    String admin = "ozone";
+    String admin = OzoneConsts.OZONE;
     String owner = UUID.randomUUID().toString();
     OzoneManagerProtocolProtos.OMRequest omRequest =
         TestOMRequestUtils.createVolumeRequest(volumeName, admin, owner);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
index be3e4a7..5e4a4c4 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
@@ -23,6 +23,7 @@ package org.apache.hadoop.ozone.om.request.key;
 import java.util.List;
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -146,7 +147,8 @@ public class TestOMAllocateBlockRequest extends 
TestOMKeyRequest {
 
 
     // Added only volume to DB.
-    TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager);
+    TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
+        omMetadataManager);
 
     OMClientResponse omAllocateBlockResponse =
         omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index 9bfac6c..cdbae54 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -154,7 +155,8 @@ public class TestOMKeyCommitRequest extends 
TestOMKeyRequest {
         new OMKeyCommitRequest(modifiedOmRequest);
 
 
-    TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager);
+    TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
+        omMetadataManager);
     String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
         keyName);
 
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index 340cc04..020e8dd 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.request.key;
 import java.util.List;
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -210,7 +211,8 @@ public class TestOMKeyCreateRequest extends 
TestOMKeyRequest {
     String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
         keyName, id);
 
-    TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager);
+    TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
+        omMetadataManager);
 
     // Before calling
     OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java
index f542268..39d06d4 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.ozone.om.request.s3.bucket;
 import java.util.UUID;
 
 import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -52,7 +53,7 @@ public class TestS3BucketDeleteRequest extends 
TestS3BucketRequest {
     OMRequest omRequest = doPreExecute(s3BucketName);
 
     // Add s3Bucket to s3Bucket table.
-    TestOMRequestUtils.addS3BucketToDB("ozone", s3BucketName,
+    TestOMRequestUtils.addS3BucketToDB(OzoneConsts.OZONE, s3BucketName,
         omMetadataManager);
 
     S3BucketDeleteRequest s3BucketDeleteRequest =
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java
index 865f4c6..b355363 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response.s3.bucket;
 
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
@@ -63,7 +64,7 @@ public class TestS3BucketDeleteResponse {
   @Test
   public void testAddToDBBatch() throws Exception {
     String s3BucketName = UUID.randomUUID().toString();
-    String userName = "ozone";
+    String userName = OzoneConsts.OZONE;
     String volumeName = S3BucketCreateRequest.formatOzoneVolumeName(userName);
     S3BucketCreateResponse s3BucketCreateResponse =
         TestOMResponseUtils.createS3BucketResponse(userName, volumeName,
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
index 912a769..5a86514 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.ozone.s3.endpoint;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
@@ -43,10 +44,10 @@ public class TestAbortMultipartUpload {
   @Test
   public void testAbortMultipartUpload() throws Exception {
 
-    String bucket = "s3bucket";
-    String key = "key1";
+    String bucket = OzoneConsts.S3_BUCKET;
+    String key = OzoneConsts.KEY;
     OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("ozone", bucket);
+    client.getObjectStore().createS3Bucket(OzoneConsts.OZONE, bucket);
 
     HttpHeaders headers = Mockito.mock(HttpHeaders.class);
     when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java
index ea574d4..580a465 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.ozone.s3.endpoint;
 
 import javax.ws.rs.core.Response;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.ObjectStoreStub;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
@@ -39,7 +40,7 @@ import org.junit.Test;
  */
 public class TestBucketDelete {
 
-  private String bucketName = "myBucket";
+  private String bucketName = OzoneConsts.BUCKET;
   private OzoneClientStub clientStub;
   private ObjectStore objectStoreStub;
   private BucketEndpoint bucketEndpoint;
@@ -51,7 +52,7 @@ public class TestBucketDelete {
     clientStub = new OzoneClientStub();
     objectStoreStub = clientStub.getObjectStore();
 
-    objectStoreStub.createS3Bucket("ozone", bucketName);
+    objectStoreStub.createS3Bucket(OzoneConsts.OZONE, bucketName);
 
     // Create HeadBucket and setClient to OzoneClientStub
     bucketEndpoint = new BucketEndpoint();
@@ -84,11 +85,9 @@ public class TestBucketDelete {
   @Test
   public void testDeleteWithBucketNotEmpty() throws Exception {
     try {
-      String bucket = "nonemptybucket";
-      objectStoreStub.createS3Bucket("ozone1", bucket);
       ObjectStoreStub stub = (ObjectStoreStub) objectStoreStub;
-      stub.setBucketEmptyStatus(bucket, false);
-      bucketEndpoint.delete(bucket);
+      stub.setBucketEmptyStatus(bucketName, false);
+      bucketEndpoint.delete(bucketName);
     } catch (OS3Exception ex) {
       assertEquals(S3ErrorTable.BUCKET_NOT_EMPTY.getCode(), ex.getCode());
       assertEquals(S3ErrorTable.BUCKET_NOT_EMPTY.getErrorMessage(),
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java
index f06da70..d9360ba 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.ozone.s3.endpoint;
 
 import javax.ws.rs.core.Response;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 
@@ -35,8 +36,8 @@ import org.junit.Test;
  */
 public class TestBucketHead {
 
-  private String bucketName = "myBucket";
-  private String userName = "ozone";
+  private String bucketName = OzoneConsts.BUCKET;
+  private String userName = OzoneConsts.OZONE;
   private OzoneClientStub clientStub;
   private ObjectStore objectStoreStub;
   private BucketEndpoint bucketEndpoint;
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java
index 212721a..f29e717 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java
@@ -20,6 +20,7 @@
 
 package org.apache.hadoop.ozone.s3.endpoint;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.client.OzoneVolume;
@@ -43,13 +44,13 @@ public class TestInitiateMultipartUpload {
   @Test
   public void testInitiateMultipartUpload() throws Exception {
 
-    String bucket = "s3bucket";
-    String key = "key1";
+    String bucket = OzoneConsts.S3_BUCKET;
+    String key = OzoneConsts.KEY;
     OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("ozone", bucket);
+    client.getObjectStore().createS3Bucket(OzoneConsts.OZONE, bucket);
     String volumeName = client.getObjectStore().getOzoneVolumeName(bucket);
     OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket ozoneBucket = volume.getBucket("s3bucket");
+    OzoneBucket ozoneBucket = volume.getBucket(OzoneConsts.S3_BUCKET);
 
 
     HttpHeaders headers = Mockito.mock(HttpHeaders.class);
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
index 21545ec..44cce58 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.s3.endpoint;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
@@ -43,15 +44,14 @@ public class TestListParts {
 
 
   private final static ObjectEndpoint REST = new ObjectEndpoint();
-  private final static String BUCKET = "s3bucket";
-  private final static String KEY = "key1";
   private static String uploadID;
 
   @BeforeClass
   public static void setUp() throws Exception {
 
     OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("ozone", BUCKET);
+    client.getObjectStore().createS3Bucket(OzoneConsts.OZONE,
+        OzoneConsts.S3_BUCKET);
 
 
     HttpHeaders headers = Mockito.mock(HttpHeaders.class);
@@ -61,7 +61,8 @@ public class TestListParts {
     REST.setHeaders(headers);
     REST.setClient(client);
 
-    Response response = REST.initializeMultipartUpload(BUCKET, KEY);
+    Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET,
+        OzoneConsts.KEY);
     MultipartUploadInitiateResponse multipartUploadInitiateResponse =
         (MultipartUploadInitiateResponse) response.getEntity();
     assertNotNull(multipartUploadInitiateResponse.getUploadID());
@@ -71,22 +72,26 @@ public class TestListParts {
 
     String content = "Multipart Upload";
     ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body);
+    response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+        content.length(), 1, uploadID, body);
 
     assertNotNull(response.getHeaderString("ETag"));
 
-    response = REST.put(BUCKET, KEY, content.length(), 2, uploadID, body);
+    response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+        content.length(), 2, uploadID, body);
 
     assertNotNull(response.getHeaderString("ETag"));
 
-    response = REST.put(BUCKET, KEY, content.length(), 3, uploadID, body);
+    response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+        content.length(), 3, uploadID, body);
 
     assertNotNull(response.getHeaderString("ETag"));
   }
 
   @Test
   public void testListParts() throws Exception {
-    Response response = REST.get(BUCKET, KEY, uploadID, 3, "0", null);
+    Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+        uploadID, 3, "0", null);
 
     ListPartsResponse listPartsResponse =
         (ListPartsResponse) response.getEntity();
@@ -98,7 +103,8 @@ public class TestListParts {
 
   @Test
   public void testListPartsContinuation() throws Exception {
-    Response response = REST.get(BUCKET, KEY, uploadID, 2, "0", null);
+    Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+        uploadID, 2, "0", null);
     ListPartsResponse listPartsResponse =
         (ListPartsResponse) response.getEntity();
 
@@ -106,7 +112,7 @@ public class TestListParts {
     Assert.assertTrue(listPartsResponse.getPartList().size() == 2);
 
     // Continue
-    response = REST.get(BUCKET, KEY, uploadID, 2,
+    response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, uploadID, 2,
         Integer.toString(listPartsResponse.getNextPartNumberMarker()), null);
     listPartsResponse = (ListPartsResponse) response.getEntity();
 
@@ -118,7 +124,8 @@ public class TestListParts {
   @Test
   public void testListPartsWithUnknownUploadID() throws Exception {
     try {
-      Response response = REST.get(BUCKET, KEY, uploadID, 2, "0", null);
+      Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+          uploadID, 2, "0", null);
     } catch (OS3Exception ex) {
       Assert.assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(),
           ex.getErrorMessage());
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
index b9e3885..c021edd 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
@@ -20,6 +20,7 @@
 
 package org.apache.hadoop.ozone.s3.endpoint;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
@@ -50,14 +51,13 @@ import static org.mockito.Mockito.when;
 public class TestMultipartUploadComplete {
 
   private final static ObjectEndpoint REST = new ObjectEndpoint();;
-  private final static String BUCKET = "s3bucket";
-  private final static String KEY = "key1";
   private final static OzoneClientStub CLIENT = new OzoneClientStub();
 
   @BeforeClass
   public static void setUp() throws Exception {
 
-    CLIENT.getObjectStore().createS3Bucket("ozone", BUCKET);
+    CLIENT.getObjectStore().createS3Bucket(OzoneConsts.OZONE,
+        OzoneConsts.S3_BUCKET);
 
 
     HttpHeaders headers = Mockito.mock(HttpHeaders.class);
@@ -70,7 +70,8 @@ public class TestMultipartUploadComplete {
 
   private String initiateMultipartUpload(String key) throws IOException,
       OS3Exception {
-    Response response = REST.initializeMultipartUpload(BUCKET, key);
+    Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET,
+        key);
     MultipartUploadInitiateResponse multipartUploadInitiateResponse =
         (MultipartUploadInitiateResponse) response.getEntity();
     assertNotNull(multipartUploadInitiateResponse.getUploadID());
@@ -85,8 +86,8 @@ public class TestMultipartUploadComplete {
   private Part uploadPart(String key, String uploadID, int partNumber, String
       content) throws IOException, OS3Exception {
     ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    Response response = REST.put(BUCKET, key, content.length(), partNumber,
-        uploadID, body);
+    Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(),
+        partNumber, uploadID, body);
     assertEquals(response.getStatus(), 200);
     assertNotNull(response.getHeaderString("ETag"));
     Part part = new Part();
@@ -99,17 +100,19 @@ public class TestMultipartUploadComplete {
   private void completeMultipartUpload(String key,
       CompleteMultipartUploadRequest completeMultipartUploadRequest,
       String uploadID) throws IOException, OS3Exception {
-    Response response = REST.completeMultipartUpload(BUCKET, key, uploadID,
-        completeMultipartUploadRequest);
+    Response response = REST.completeMultipartUpload(OzoneConsts.S3_BUCKET, 
key,
+        uploadID, completeMultipartUploadRequest);
 
     assertEquals(response.getStatus(), 200);
 
     CompleteMultipartUploadResponse completeMultipartUploadResponse =
         (CompleteMultipartUploadResponse) response.getEntity();
 
-    assertEquals(completeMultipartUploadResponse.getBucket(), BUCKET);
-    assertEquals(completeMultipartUploadResponse.getKey(), KEY);
-    assertEquals(completeMultipartUploadResponse.getLocation(), BUCKET);
+    assertEquals(completeMultipartUploadResponse.getBucket(),
+        OzoneConsts.S3_BUCKET);
+    assertEquals(completeMultipartUploadResponse.getKey(), OzoneConsts.KEY);
+    assertEquals(completeMultipartUploadResponse.getLocation(),
+        OzoneConsts.S3_BUCKET);
     assertNotNull(completeMultipartUploadResponse.getETag());
   }
 
@@ -117,7 +120,7 @@ public class TestMultipartUploadComplete {
   public void testMultipart() throws Exception {
 
     // Initiate multipart upload
-    String uploadID = initiateMultipartUpload(KEY);
+    String uploadID = initiateMultipartUpload(OzoneConsts.KEY);
 
     List<Part> partsList = new ArrayList<>();
 
@@ -126,12 +129,12 @@ public class TestMultipartUploadComplete {
     String content = "Multipart Upload 1";
     int partNumber = 1;
 
-    Part part1 = uploadPart(KEY, uploadID, partNumber, content);
+    Part part1 = uploadPart(OzoneConsts.KEY, uploadID, partNumber, content);
     partsList.add(part1);
 
     content = "Multipart Upload 2";
     partNumber = 2;
-    Part part2 = uploadPart(KEY, uploadID, partNumber, content);
+    Part part2 = uploadPart(OzoneConsts.KEY, uploadID, partNumber, content);
     partsList.add(part2);
 
     // complete multipart upload
@@ -140,7 +143,7 @@ public class TestMultipartUploadComplete {
     completeMultipartUploadRequest.setPartList(partsList);
 
 
-    completeMultipartUpload(KEY, completeMultipartUploadRequest,
+    completeMultipartUpload(OzoneConsts.KEY, completeMultipartUploadRequest,
         uploadID);
 
   }
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
index 425bfc4..f688ff9 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java
@@ -34,6 +34,7 @@ import java.util.Scanner;
 
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
@@ -59,7 +60,6 @@ public class TestMultipartUploadWithCopy {
 
   private final static ObjectEndpoint REST = new ObjectEndpoint();
 
-  private final static String BUCKET = "s3bucket";
   private final static String KEY = "key2";
   private final static String EXISTING_KEY = "key1";
   private static final String EXISTING_KEY_CONTENT = "testkey";
@@ -71,9 +71,9 @@ public class TestMultipartUploadWithCopy {
   public static void setUp() throws Exception {
 
     ObjectStore objectStore = CLIENT.getObjectStore();
-    objectStore.createS3Bucket("ozone", BUCKET);
+    objectStore.createS3Bucket(OzoneConsts.OZONE, OzoneConsts.S3_BUCKET);
 
-    OzoneBucket bucket = getOzoneBucket(objectStore, BUCKET);
+    OzoneBucket bucket = getOzoneBucket(objectStore, OzoneConsts.S3_BUCKET);
 
     byte[] keyContent = EXISTING_KEY_CONTENT.getBytes();
     try (OutputStream stream = bucket
@@ -108,13 +108,13 @@ public class TestMultipartUploadWithCopy {
     partNumber = 2;
     Part part2 =
         uploadPartWithCopy(KEY, uploadID, partNumber,
-            BUCKET + "/" + EXISTING_KEY, null);
+            OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY, null);
     partsList.add(part2);
 
     partNumber = 3;
     Part part3 =
         uploadPartWithCopy(KEY, uploadID, partNumber,
-            BUCKET + "/" + EXISTING_KEY,
+            OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY,
             "bytes=" + RANGE_FROM + "-" + RANGE_TO);
     partsList.add(part3);
 
@@ -126,7 +126,8 @@ public class TestMultipartUploadWithCopy {
     completeMultipartUpload(KEY, completeMultipartUploadRequest,
         uploadID);
 
-    OzoneBucket bucket = getOzoneBucket(CLIENT.getObjectStore(), BUCKET);
+    OzoneBucket bucket = getOzoneBucket(CLIENT.getObjectStore(),
+        OzoneConsts.S3_BUCKET);
     try (InputStream is = bucket.readKey(KEY)) {
       String keyContent = new Scanner(is).useDelimiter("\\A").next();
       Assert.assertEquals(content + EXISTING_KEY_CONTENT + EXISTING_KEY_CONTENT
@@ -137,7 +138,8 @@ public class TestMultipartUploadWithCopy {
   private String initiateMultipartUpload(String key) throws IOException,
       OS3Exception {
     setHeaders();
-    Response response = REST.initializeMultipartUpload(BUCKET, key);
+    Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET,
+        key);
     MultipartUploadInitiateResponse multipartUploadInitiateResponse =
         (MultipartUploadInitiateResponse) response.getEntity();
     assertNotNull(multipartUploadInitiateResponse.getUploadID());
@@ -153,8 +155,8 @@ public class TestMultipartUploadWithCopy {
       content) throws IOException, OS3Exception {
     setHeaders();
     ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    Response response = REST.put(BUCKET, key, content.length(), partNumber,
-        uploadID, body);
+    Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(),
+        partNumber, uploadID, body);
     assertEquals(response.getStatus(), 200);
     assertNotNull(response.getHeaderString("ETag"));
     Part part = new Part();
@@ -175,7 +177,7 @@ public class TestMultipartUploadWithCopy {
     setHeaders(additionalHeaders);
 
     ByteArrayInputStream body = new ByteArrayInputStream("".getBytes());
-    Response response = REST.put(BUCKET, key, 0, partNumber,
+    Response response = REST.put(OzoneConsts.S3_BUCKET, key, 0, partNumber,
         uploadID, body);
     assertEquals(response.getStatus(), 200);
 
@@ -193,17 +195,19 @@ public class TestMultipartUploadWithCopy {
       CompleteMultipartUploadRequest completeMultipartUploadRequest,
       String uploadID) throws IOException, OS3Exception {
     setHeaders();
-    Response response = REST.completeMultipartUpload(BUCKET, key, uploadID,
-        completeMultipartUploadRequest);
+    Response response = REST.completeMultipartUpload(OzoneConsts.S3_BUCKET, 
key,
+        uploadID, completeMultipartUploadRequest);
 
     assertEquals(response.getStatus(), 200);
 
     CompleteMultipartUploadResponse completeMultipartUploadResponse =
         (CompleteMultipartUploadResponse) response.getEntity();
 
-    assertEquals(completeMultipartUploadResponse.getBucket(), BUCKET);
+    assertEquals(completeMultipartUploadResponse.getBucket(),
+        OzoneConsts.S3_BUCKET);
     assertEquals(completeMultipartUploadResponse.getKey(), KEY);
-    assertEquals(completeMultipartUploadResponse.getLocation(), BUCKET);
+    assertEquals(completeMultipartUploadResponse.getLocation(),
+        OzoneConsts.S3_BUCKET);
     assertNotNull(completeMultipartUploadResponse.getETag());
   }
 
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
index 839834c..775a18b 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
@@ -27,6 +27,7 @@ import java.io.IOException;
 import java.nio.charset.Charset;
 
 import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
@@ -51,7 +52,7 @@ import static org.mockito.Mockito.when;
  */
 public class TestObjectPut {
   public static final String CONTENT = "0123456789";
-  private String userName = "ozone";
+  private String userName = OzoneConsts.OZONE;
   private String bucketName = "b1";
   private String keyName = "key1";
   private String destBucket = "b2";
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
index 3e91a77..08db655 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
@@ -20,6 +20,7 @@
 
 package org.apache.hadoop.ozone.s3.endpoint;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.junit.BeforeClass;
@@ -45,14 +46,13 @@ import static org.mockito.Mockito.when;
 public class TestPartUpload {
 
   private final static ObjectEndpoint REST = new ObjectEndpoint();
-  private final static String BUCKET = "s3bucket";
-  private final static String KEY = "key1";
 
   @BeforeClass
   public static void setUp() throws Exception {
 
     OzoneClientStub client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("ozone", BUCKET);
+    client.getObjectStore().createS3Bucket(OzoneConsts.OZONE,
+        OzoneConsts.S3_BUCKET);
 
 
     HttpHeaders headers = Mockito.mock(HttpHeaders.class);
@@ -67,7 +67,8 @@ public class TestPartUpload {
   @Test
   public void testPartUpload() throws Exception {
 
-    Response response = REST.initializeMultipartUpload(BUCKET, KEY);
+    Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET,
+        OzoneConsts.KEY);
     MultipartUploadInitiateResponse multipartUploadInitiateResponse =
         (MultipartUploadInitiateResponse) response.getEntity();
     assertNotNull(multipartUploadInitiateResponse.getUploadID());
@@ -77,7 +78,8 @@ public class TestPartUpload {
 
     String content = "Multipart Upload";
     ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body);
+    response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+        content.length(), 1, uploadID, body);
 
     assertNotNull(response.getHeaderString("ETag"));
 
@@ -86,7 +88,8 @@ public class TestPartUpload {
   @Test
   public void testPartUploadWithOverride() throws Exception {
 
-    Response response = REST.initializeMultipartUpload(BUCKET, KEY);
+    Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET,
+        OzoneConsts.KEY);
     MultipartUploadInitiateResponse multipartUploadInitiateResponse =
         (MultipartUploadInitiateResponse) response.getEntity();
     assertNotNull(multipartUploadInitiateResponse.getUploadID());
@@ -96,7 +99,8 @@ public class TestPartUpload {
 
     String content = "Multipart Upload";
     ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-    response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body);
+    response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+        content.length(), 1, uploadID, body);
 
     assertNotNull(response.getHeaderString("ETag"));
 
@@ -104,7 +108,8 @@ public class TestPartUpload {
 
     // Upload part again with same part Number, the ETag should be changed.
     content = "Multipart Upload Changed";
-    response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body);
+    response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+        content.length(), 1, uploadID, body);
     assertNotNull(response.getHeaderString("ETag"));
     assertNotEquals(eTag, response.getHeaderString("ETag"));
 
@@ -116,7 +121,8 @@ public class TestPartUpload {
     try {
       String content = "Multipart Upload With Incorrect uploadID";
       ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
-      REST.put(BUCKET, KEY, content.length(), 1, "random", body);
+      REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1,
+          "random", body);
       fail("testPartUploadWithIncorrectUploadID failed");
     } catch (OS3Exception ex) {
       assertEquals("NoSuchUpload", ex.getCode());
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
index b7512cb..04e3e95 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
@@ -20,6 +20,7 @@
 
 package org.apache.hadoop.ozone.s3.endpoint;
 
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser;
@@ -37,7 +38,7 @@ public class TestRootList {
   private OzoneClientStub clientStub;
   private ObjectStore objectStoreStub;
   private RootEndpoint rootEndpoint;
-  private String userName = "ozone";
+  private String userName = OzoneConsts.OZONE;
 
   @Before
   public void setup() throws Exception {
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
index 9c0b541..66a1c3c 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.Level;
@@ -73,7 +74,7 @@ public class BenchMarkContainerStateMap {
             .setUsedBytes(0)
             .setNumberOfKeys(0)
             .setStateEnterTime(Time.monotonicNow())
-            .setOwner("OZONE")
+            .setOwner(OzoneConsts.OZONE)
             .setContainerID(x)
             .setDeleteTransactionId(0)
             .build();
@@ -93,7 +94,7 @@ public class BenchMarkContainerStateMap {
             .setUsedBytes(0)
             .setNumberOfKeys(0)
             .setStateEnterTime(Time.monotonicNow())
-            .setOwner("OZONE")
+            .setOwner(OzoneConsts.OZONE)
             .setContainerID(y)
             .setDeleteTransactionId(0)
             .build();
@@ -112,7 +113,7 @@ public class BenchMarkContainerStateMap {
           .setUsedBytes(0)
           .setNumberOfKeys(0)
           .setStateEnterTime(Time.monotonicNow())
-          .setOwner("OZONE")
+          .setOwner(OzoneConsts.OZONE)
           .setContainerID(currentCount++)
           .setDeleteTransactionId(0)
           .build();
@@ -181,7 +182,7 @@ public class BenchMarkContainerStateMap {
         .setUsedBytes(0)
         .setNumberOfKeys(0)
         .setStateEnterTime(Time.monotonicNow())
-        .setOwner("OZONE")
+        .setOwner(OzoneConsts.OZONE)
         .setContainerID(cid)
         .setDeleteTransactionId(0)
         .build();
@@ -194,7 +195,7 @@ public class BenchMarkContainerStateMap {
       state.stateMap.addContainer(getContainerInfo(state));
     }
     bh.consume(state.stateMap
-        .getMatchingContainerIDs(OPEN, "OZONE", ReplicationFactor.ONE,
+        .getMatchingContainerIDs(OPEN, OzoneConsts.OZONE, 
ReplicationFactor.ONE,
             ReplicationType.STAND_ALONE));
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-commits-h...@hadoop.apache.org

Reply via email to