This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new d55524c78b HDDS-9117. Source bucket replication type is ignored for 
links (#5144)
d55524c78b is described below

commit d55524c78bcd18fc5cea074adbc36a5f4cb59bd0
Author: Cyrill <[email protected]>
AuthorDate: Tue Oct 3 10:06:29 2023 +0300

    HDDS-9117. Source bucket replication type is ignored for links (#5144)
---
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  42 +++++
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  35 ++---
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  74 +++++++--
 .../apache/hadoop/ozone/om/OzoneManagerUtils.java  | 169 +++++++++------------
 .../org/apache/hadoop/ozone/om/ResolvedBucket.java |   5 +
 .../hadoop/ozone/om/TestBucketManagerImpl.java     |  89 +++++++++++
 .../apache/hadoop/ozone/debug/PrefixParser.java    |  18 +--
 7 files changed, 294 insertions(+), 138 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 1a28183f40..c528cb5d4c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -898,6 +898,48 @@ public abstract class TestOzoneRpcClientAbstract {
     );
   }
 
+  @Test
+  public void testDeleteLinkedBucket()
+      throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String linkedBucketName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    assertNotNull(bucket);
+
+    volume.createBucket(linkedBucketName,
+        BucketArgs.newBuilder()
+            .setSourceBucket(bucketName)
+            .setSourceVolume(volumeName)
+            .build());
+    OzoneBucket linkedBucket = volume.getBucket(linkedBucketName);
+    assertNotNull(linkedBucket);
+
+    volume.deleteBucket(bucketName);
+
+    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND,
+        () -> volume.getBucket(bucketName)
+    );
+    //now linkedBucketName has become a dangling one
+    //should still be possible to get its info
+    OzoneBucket danglingLinkedBucket = volume.getBucket(linkedBucketName);
+    assertNotNull(danglingLinkedBucket);
+
+    //now delete the dangling linked bucket
+    volume.deleteBucket(linkedBucketName);
+
+    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND,
+        () -> volume.getBucket(linkedBucketName)
+    );
+
+    store.deleteVolume(volumeName);
+  }
+
   private void verifyReplication(String volumeName, String bucketName,
       String keyName, ReplicationConfig replication)
       throws IOException {
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
index 815eee2924..5bc894b2b9 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
@@ -17,7 +17,6 @@
 package org.apache.hadoop.ozone.om;
 
 import java.io.IOException;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Objects;
 
@@ -36,7 +35,6 @@ import org.slf4j.LoggerFactory;
 
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR;
-import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
 import static 
org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 
 /**
@@ -61,6 +59,16 @@ public class BucketManagerImpl implements BucketManager {
     this.metadataManager = metadataManager;
   }
 
+  /**
+   * Retrieve bucket info.
+   * This method does not follow the bucket link and returns only
+   * this bucket properties.
+   *
+   * @param volumeName - Name of the Volume.
+   * @param bucketName - Name of the Bucket.
+   * @return Bucket Information.
+   * @throws IOException
+   */
   @Override
   public OmBucketInfo getBucketInfo(String volumeName, String bucketName)
       throws IOException {
@@ -69,27 +77,8 @@ public class BucketManagerImpl implements BucketManager {
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
         bucketName);
     try {
-      String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      OmBucketInfo value = metadataManager.getBucketTable().get(bucketKey);
-      if (value == null) {
-        LOG.debug("bucket: {} not found in volume: {}.", bucketName,
-            volumeName);
-        // Check parent volume existence
-        final String dbVolumeKey = metadataManager.getVolumeKey(volumeName);
-        if (metadataManager.getVolumeTable().get(dbVolumeKey) == null) {
-          // Parent volume doesn't exist, throw VOLUME_NOT_FOUND
-          throw new OMException("Volume not found when getting bucket info",
-              VOLUME_NOT_FOUND);
-        } else {
-          // Parent volume exists, throw BUCKET_NOT_FOUND
-          throw new OMException("Bucket not found", BUCKET_NOT_FOUND);
-        }
-      }
-
-      value = OzoneManagerUtils.resolveLinkBucketLayout(value, metadataManager,
-          new HashSet<>());
-
-      return value;
+      return OzoneManagerUtils.getBucketInfo(metadataManager,
+          volumeName, bucketName);
     } catch (IOException ex) {
       if (!(ex instanceof OMException)) {
         LOG.error("Exception while getting bucket info for bucket: {}",
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 31e3abebb6..a9a13a49a2 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -2790,7 +2790,45 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
             bucket, null);
       }
       metrics.incNumBucketInfos();
-      return bucketManager.getBucketInfo(volume, bucket);
+
+      OmBucketInfo bucketInfo = bucketManager.getBucketInfo(volume, bucket);
+
+      // No links - return the bucket info right away.
+      if (!bucketInfo.isLink()) {
+        return bucketInfo;
+      }
+      // Otherwise follow the links to find the real bucket.
+      // We already know that `bucketInfo` is a linked one,
+      // so we skip one `getBucketInfo` and start with the known link.
+      ResolvedBucket resolvedBucket =
+          resolveBucketLink(Pair.of(
+                  bucketInfo.getSourceVolume(),
+                  bucketInfo.getSourceBucket()),
+              true);
+
+      // If it is a dangling link it means no real bucket exists,
+      // for example, it could have been deleted, but the links still present.
+      if (!resolvedBucket.isDangling()) {
+        OmBucketInfo realBucket =
+            bucketManager.getBucketInfo(
+                resolvedBucket.realVolume(),
+                resolvedBucket.realBucket());
+        // Pass the real bucket metadata in the link bucket info.
+        return bucketInfo.toBuilder()
+            .setDefaultReplicationConfig(
+                realBucket.getDefaultReplicationConfig())
+            .setIsVersionEnabled(realBucket.getIsVersionEnabled())
+            .setStorageType(realBucket.getStorageType())
+            .setQuotaInBytes(realBucket.getQuotaInBytes())
+            .setQuotaInNamespace(realBucket.getQuotaInNamespace())
+            .setUsedBytes(realBucket.getUsedBytes())
+            .setUsedNamespace(realBucket.getUsedNamespace())
+            .addAllMetadata(realBucket.getMetadata())
+            .setBucketLayout(realBucket.getBucketLayout())
+            .build();
+      }
+      // If no real bucket exists, return the requested one's info.
+      return bucketInfo;
     } catch (Exception ex) {
       metrics.incNumBucketInfoFails();
       auditSuccess = false;
@@ -4140,6 +4178,11 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
         Pair.of(args.getVolumeName(), args.getBucketName()), omClientRequest);
   }
 
+  public ResolvedBucket resolveBucketLink(Pair<String, String> requested)
+      throws IOException {
+    return resolveBucketLink(requested, false);
+  }
+
   public ResolvedBucket resolveBucketLink(OmKeyArgs args)
       throws IOException {
     return resolveBucketLink(
@@ -4154,15 +4197,17 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
       resolved = resolveBucketLink(requested, new HashSet<>(),
               omClientRequest.createUGIForApi(),
               omClientRequest.getRemoteAddress(),
-              omClientRequest.getHostName());
+              omClientRequest.getHostName(),
+              false);
     } else {
       resolved = resolveBucketLink(requested, new HashSet<>(),
-          null, null, null);
+          null, null, null, false);
     }
     return new ResolvedBucket(requested, resolved);
   }
 
-  public ResolvedBucket resolveBucketLink(Pair<String, String> requested)
+  public ResolvedBucket resolveBucketLink(Pair<String, String> requested,
+                                          boolean allowDanglingBuckets)
       throws IOException {
     Pair<String, String> resolved;
     if (isAclEnabled) {
@@ -4176,10 +4221,10 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
           ugi,
           remoteIp != null ? remoteIp : omRpcAddress.getAddress(),
           remoteIp != null ? remoteIp.getHostName() :
-              omRpcAddress.getHostName());
+              omRpcAddress.getHostName(), allowDanglingBuckets);
     } else {
       resolved = resolveBucketLink(requested, new HashSet<>(),
-          null, null, null);
+          null, null, null, allowDanglingBuckets);
     }
     return new ResolvedBucket(requested, resolved);
   }
@@ -4200,11 +4245,21 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
       Set<Pair<String, String>> visited,
       UserGroupInformation userGroupInformation,
       InetAddress remoteAddress,
-      String hostName) throws IOException {
+      String hostName,
+      boolean allowDanglingBuckets) throws IOException {
 
     String volumeName = volumeAndBucket.getLeft();
     String bucketName = volumeAndBucket.getRight();
-    OmBucketInfo info = bucketManager.getBucketInfo(volumeName, bucketName);
+    OmBucketInfo info;
+    try {
+      info = bucketManager.getBucketInfo(volumeName, bucketName);
+    } catch (OMException e) {
+      LOG.warn("Bucket {} not found in volume {}", bucketName, volumeName);
+      if (allowDanglingBuckets) {
+        return null;
+      }
+      throw e;
+    }
     if (!info.isLink()) {
       return volumeAndBucket;
     }
@@ -4224,7 +4279,8 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
 
     return resolveBucketLink(
         Pair.of(info.getSourceVolume(), info.getSourceBucket()),
-        visited, userGroupInformation, remoteAddress, hostName);
+        visited, userGroupInformation, remoteAddress, hostName,
+        allowDanglingBuckets);
   }
 
   @VisibleForTesting
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java
index 6c33e9e00f..5a4ff64315 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java
@@ -62,12 +62,34 @@ public final class OzoneManagerUtils {
    * omMetadataManager().getBucketTable().get(buckKey)
    */
 
-  private static OmBucketInfo getOmBucketInfo(OMMetadataManager metaMgr,
-      String volName, String buckName) throws IOException {
+  public static OmBucketInfo getBucketInfo(OMMetadataManager metaMgr,
+                                           String volName,
+                                           String buckName)
+      throws IOException {
     String buckKey = metaMgr.getBucketKey(volName, buckName);
-    return metaMgr.getBucketTable().get(buckKey);
+    OmBucketInfo bucketInfo = metaMgr.getBucketTable().get(buckKey);
+    if (bucketInfo == null) {
+      reportNotFound(metaMgr, volName, buckName);
+    }
+    return bucketInfo;
+  }
+
+  private static void reportNotFound(OMMetadataManager metaMgr,
+                                     String volName,
+                                     String buckName)
+      throws IOException {
+    if (!metaMgr.getVolumeTable()
+        .isExist(metaMgr.getVolumeKey(volName))) {
+      throw new OMException("Volume not found: " + volName,
+          OMException.ResultCodes.VOLUME_NOT_FOUND);
+    }
+
+    throw new OMException("Bucket not found: " + volName + "/" + buckName,
+        OMException.ResultCodes.BUCKET_NOT_FOUND);
   }
 
+
+
   /**
    * Get bucket layout for the given volume and bucket name.
    *
@@ -81,122 +103,75 @@ public final class OzoneManagerUtils {
                                              String volName,
                                              String buckName)
       throws IOException {
-    return getBucketLayout(metadataManager, volName, buckName, new 
HashSet<>());
+    return getResolvedBucketInfo(metadataManager, volName, buckName)
+        .getBucketLayout();
   }
 
   /**
-   * Get bucket layout for the given volume and bucket name.
+   * Get bucket info for the given volume and bucket name.
    *
    * @param metadataManager metadata manager
    * @param volName         volume name
    * @param buckName        bucket name
-   * @return bucket layout
+   * @return bucket info
    * @throws IOException
    */
-  private static BucketLayout getBucketLayout(OMMetadataManager 
metadataManager,
-                                              String volName,
-                                              String buckName,
-                                              Set<Pair<String, String>> 
visited)
+  public static OmBucketInfo getResolvedBucketInfo(
+      OMMetadataManager metadataManager,
+      String volName,
+      String buckName)
       throws IOException {
-
-    OmBucketInfo buckInfo = getOmBucketInfo(metadataManager, volName, 
buckName);
-
-    if (buckInfo != null) {
-      // If this is a link bucket, we fetch the BucketLayout from the
-      // source bucket.
-      if (buckInfo.isLink()) {
-        // Check if this bucket was already visited - to avoid loops
-        if (!visited.add(Pair.of(volName, buckName))) {
-          throw new OMException("Detected loop in bucket links. Bucket name: " 
+
-              buckName + ", Volume name: " + volName,
-              DETECTED_LOOP_IN_BUCKET_LINKS);
-        }
-        OmBucketInfo sourceBuckInfo =
-            getOmBucketInfo(metadataManager, buckInfo.getSourceVolume(),
-                buckInfo.getSourceBucket());
-        if (sourceBuckInfo != null) {
-          /** If the source bucket is again a link, we recursively resolve the
-           * link bucket.
-           *
-           * For example:
-           * buck-link1 -> buck-link2 -> buck-link3 -> buck-src
-           * buck-src has the actual BucketLayout that will be used by the
-           * links.
-           */
-          if (sourceBuckInfo.isLink()) {
-            return getBucketLayout(metadataManager,
-                sourceBuckInfo.getVolumeName(),
-                sourceBuckInfo.getBucketName(), visited);
-          }
-          return sourceBuckInfo.getBucketLayout();
-        }
-      }
-      return buckInfo.getBucketLayout();
-    }
-
-    if (!metadataManager.getVolumeTable()
-        .isExist(metadataManager.getVolumeKey(volName))) {
-      throw new OMException("Volume not found: " + volName,
-          OMException.ResultCodes.VOLUME_NOT_FOUND);
-    }
-
-    throw new OMException("Bucket not found: " + volName + "/" + buckName,
-        OMException.ResultCodes.BUCKET_NOT_FOUND);
+    return resolveBucketInfoLink(metadataManager, volName, buckName,
+        new HashSet<>());
   }
 
   /**
-   * Resolve bucket layout for a given link bucket's OmBucketInfo.
+   * Get bucket info for the given volume and bucket name, following all links
+   * and returns the last bucket in the chain.
    *
-   * @param bucketInfo
-   * @return {@code OmBucketInfo} with
-   * @throws IOException
+   * @param metadataManager metadata manager
+   * @param volName         volume name
+   * @param buckName        bucket name
+   * @return bucket info. If the bucket is a linked one,
+   * returns the info of the last one in the chain.
+   * @throws IOException if the bucket does not exist, if it is a link and
+   * there is a loop or the link is pointing to a missing bucket.
    */
-  public static OmBucketInfo resolveLinkBucketLayout(OmBucketInfo bucketInfo,
-                                                     OMMetadataManager
-                                                         metadataManager,
-                                                     Set<Pair<String,
-                                                         String>> visited)
+  private static OmBucketInfo resolveBucketInfoLink(
+      OMMetadataManager metadataManager,
+      String volName,
+      String buckName,
+      Set<Pair<String, String>> visited)
       throws IOException {
 
-    if (bucketInfo.isLink()) {
-      if (!visited.add(Pair.of(bucketInfo.getVolumeName(),
-          bucketInfo.getBucketName()))) {
+    OmBucketInfo buckInfo =
+        getBucketInfo(metadataManager, volName, buckName);
+
+    // If this is a link bucket, we fetch the BucketLayout from the
+    // source bucket.
+    if (buckInfo.isLink()) {
+      // Check if this bucket was already visited - to avoid loops
+      if (!visited.add(Pair.of(volName, buckName))) {
         throw new OMException("Detected loop in bucket links. Bucket name: " +
-            bucketInfo.getBucketName() + ", Volume name: " +
-            bucketInfo.getVolumeName(),
+            buckName + ", Volume name: " + volName,
             DETECTED_LOOP_IN_BUCKET_LINKS);
       }
-      String sourceBucketKey = metadataManager
-          .getBucketKey(bucketInfo.getSourceVolume(),
-              bucketInfo.getSourceBucket());
-      OmBucketInfo sourceBucketInfo =
-          metadataManager.getBucketTable().get(sourceBucketKey);
-
-      // If the Link Bucket's source bucket exists, we get its layout.
-      if (sourceBucketInfo != null) {
-
-        /** If the source bucket is again a link, we recursively resolve the
-         * link bucket.
-         *
-         * For example:
-         * buck-link1 -> buck-link2 -> buck-link3 -> buck-src
-         * buck-src has the actual BucketLayout that will be used by the links.
-         *
-         * Finally - we return buck-link1's OmBucketInfo, with buck-src's
-         * bucket layout.
-         */
-        if (sourceBucketInfo.isLink()) {
-          sourceBucketInfo =
-              resolveLinkBucketLayout(sourceBucketInfo, metadataManager,
-                  visited);
-        }
-
-        OmBucketInfo.Builder buckInfoBuilder = bucketInfo.toBuilder();
-        buckInfoBuilder.setBucketLayout(sourceBucketInfo.getBucketLayout());
-        bucketInfo = buckInfoBuilder.build();
+      /* If the source bucket is again a link, we recursively resolve the
+       * link bucket.
+       *
+       * For example:
+       * buck-link1 -> buck-link2 -> buck-link3 -> buck-src
+       * buck-src has the actual BucketLayout that will be used by the
+       * links.
+       */
+      try {
+        return resolveBucketInfoLink(metadataManager,
+            buckInfo.getSourceVolume(), buckInfo.getSourceBucket(), visited);
+      } catch (IOException e) {
+        throw e;
       }
     }
-    return bucketInfo;
+    return buckInfo;
   }
 
   /**
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java
index fef9b2e35a..8796be405b 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java
@@ -89,6 +89,11 @@ public class ResolvedBucket {
     return !Objects.equals(requested, resolved);
   }
 
+  public boolean isDangling() {
+    return resolved == null;
+  }
+
+
   public Map<String, String> audit() {
     return audit(new LinkedHashMap<>());
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
index 415dd2a0cc..8982457e2d 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
@@ -23,6 +23,8 @@ import java.util.Collections;
 
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.StorageType;
@@ -51,6 +53,8 @@ import org.junit.runner.RunWith;
 import org.mockito.Mockito;
 import org.mockito.runners.MockitoJUnitRunner;
 
+import static java.util.Collections.singletonMap;
+
 /**
  * Tests BucketManagerImpl, mocks OMMetadataManager for testing.
  */
@@ -388,6 +392,91 @@ public class TestBucketManagerImpl {
     }
   }
 
+  @Test
+  public void testLinkedBucketResolution() throws Exception {
+    createSampleVol();
+    ECReplicationConfig ecConfig = new ECReplicationConfig(3, 2);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sample-vol")
+        .setBucketName("bucket-one")
+        .setDefaultReplicationConfig(
+            new DefaultReplicationConfig(
+                ecConfig))
+        .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED)
+        .setQuotaInBytes(42 * 1024)
+        .setQuotaInNamespace(24 * 1024)
+        .setUsedBytes(10 * 1024)
+        .setUsedNamespace(5 * 1024)
+        .setStorageType(StorageType.SSD)
+        .setIsVersionEnabled(true)
+        .addAllMetadata(singletonMap("CustomKey", "CustomValue"))
+        .build();
+    writeClient.createBucket(bucketInfo);
+
+    OmBucketInfo bucketLinkInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sample-vol")
+        .setBucketName("link-one")
+        .setSourceVolume("sample-vol")
+        .setSourceBucket("bucket-one")
+        .build();
+    writeClient.createBucket(bucketLinkInfo);
+
+    OmBucketInfo bucketLink2 = OmBucketInfo.newBuilder()
+        .setVolumeName("sample-vol")
+        .setBucketName("link-two")
+        .setSourceVolume("sample-vol")
+        .setSourceBucket("link-one")
+        .build();
+    writeClient.createBucket(bucketLink2);
+
+    OmBucketInfo storedLinkBucket =
+        writeClient.getBucketInfo("sample-vol", "link-two");
+    Assert.assertNotNull("Replication config is not set",
+                         storedLinkBucket.getDefaultReplicationConfig());
+    Assert.assertEquals(ecConfig,
+                        storedLinkBucket
+                            .getDefaultReplicationConfig()
+                            .getReplicationConfig());
+
+    Assert.assertEquals(
+        "link-two", storedLinkBucket.getBucketName());
+    Assert.assertEquals(
+        "sample-vol", storedLinkBucket.getVolumeName());
+
+    Assert.assertEquals(
+        "link-one", storedLinkBucket.getSourceBucket());
+    Assert.assertEquals(
+        "sample-vol", storedLinkBucket.getSourceVolume());
+
+    Assert.assertEquals(
+        bucketInfo.getBucketLayout(),
+        storedLinkBucket.getBucketLayout());
+    Assert.assertEquals(
+        bucketInfo.getQuotaInBytes(),
+        storedLinkBucket.getQuotaInBytes());
+    Assert.assertEquals(
+        bucketInfo.getQuotaInNamespace(),
+        storedLinkBucket.getQuotaInNamespace());
+    Assert.assertEquals(
+        bucketInfo.getUsedBytes(),
+        storedLinkBucket.getUsedBytes());
+    Assert.assertEquals(
+        bucketInfo.getUsedNamespace(),
+        storedLinkBucket.getUsedNamespace());
+    Assert.assertEquals(
+        bucketInfo.getDefaultReplicationConfig(),
+        storedLinkBucket.getDefaultReplicationConfig());
+    Assert.assertEquals(
+        bucketInfo.getMetadata(),
+        storedLinkBucket.getMetadata());
+    Assert.assertEquals(
+        bucketInfo.getStorageType(),
+        storedLinkBucket.getStorageType());
+    Assert.assertEquals(
+        bucketInfo.getIsVersionEnabled(),
+        storedLinkBucket.getIsVersionEnabled());
+  }
+
   private BucketLayout getBucketLayout() {
     return BucketLayout.DEFAULT;
   }
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
index e877597d17..fabc7f456a 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.debug;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.concurrent.Callable;
@@ -36,6 +35,7 @@ import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.OzoneManagerUtils;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.WithParentObjectId;
@@ -147,17 +147,16 @@ public class PrefixParser implements Callable<Void>, 
SubcommandWithParent {
 
     parserStats[Types.VOLUME.ordinal()]++;
     // First get the info about the bucket
-    String bucketKey = metadataManager.getBucketKey(vol, buck);
-    OmBucketInfo info = metadataManager.getBucketTable().get(bucketKey);
-    if (info == null) {
+    OmBucketInfo info;
+    try {
+      info = OzoneManagerUtils
+          .getResolvedBucketInfo(metadataManager, vol, buck);
+    } catch (OMException e) {
       System.out.println("Invalid Bucket:" + buck);
       metadataManager.stop();
       return;
     }
-
-    BucketLayout bucketLayout =
-        OzoneManagerUtils.resolveLinkBucketLayout(info, metadataManager,
-            new HashSet<>()).getBucketLayout();
+    BucketLayout bucketLayout = info.getBucketLayout();
 
     if (!bucketLayout.isFileSystemOptimized()) {
       System.out.println("Prefix tool only works for FileSystem Optimized" +
@@ -171,7 +170,8 @@ public class PrefixParser implements Callable<Void>, 
SubcommandWithParent {
     long lastObjectId = info.getObjectID();
     WithParentObjectId objectBucketId = new WithParentObjectId();
     objectBucketId.setObjectID(lastObjectId);
-    dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey);
+    dumpInfo(Types.BUCKET, effectivePath, objectBucketId,
+        metadataManager.getBucketKey(vol, buck));
 
     Iterator<Path> pathIterator =  p.iterator();
     while (pathIterator.hasNext()) {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to