This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new aef6a4f  HDDS-1950. S3 MPU part-list call fails if there are no parts
aef6a4f is described below

commit aef6a4fe0d04fe0d42fa36dc04cac2cc53ae8efd
Author: Márton Elek <e...@apache.org>
AuthorDate: Sun Aug 11 14:32:00 2019 +0200

    HDDS-1950. S3 MPU part-list call fails if there are no parts
    
    Signed-off-by: Anu Engineer <aengin...@apache.org>
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  24 ++++-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 111 +++++++++++++++++++++
 2 files changed, 133 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index b58095f..4f56160 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1298,8 +1298,9 @@ public class KeyManagerImpl implements KeyManager {
             multipartKeyInfo.getPartKeyInfoMap();
         Iterator<Map.Entry<Integer, PartKeyInfo>> partKeyInfoMapIterator =
             partKeyInfoMap.entrySet().iterator();
-        HddsProtos.ReplicationType replicationType =
-            partKeyInfoMap.firstEntry().getValue().getPartKeyInfo().getType();
+
+        HddsProtos.ReplicationType replicationType = null;
+
         int count = 0;
         List<OmPartInfo> omPartInfoList = new ArrayList<>();
 
@@ -1316,11 +1317,30 @@ public class KeyManagerImpl implements KeyManager {
                 partKeyInfo.getPartKeyInfo().getModificationTime(),
                 partKeyInfo.getPartKeyInfo().getDataSize());
             omPartInfoList.add(omPartInfo);
+
+            //if there are parts, use replication type from one of the parts
             replicationType = partKeyInfo.getPartKeyInfo().getType();
             count++;
           }
         }
 
+        if (replicationType == null) {
+          //if there are no parts, use the replicationType from the open key.
+
+          OmKeyInfo omKeyInfo =
+              metadataManager.getOpenKeyTable().get(multipartKey);
+
+          if (omKeyInfo == null) {
+            throw new IllegalStateException(
+                "Open key is missing for multipart upload " + multipartKey);
+          }
+
+          replicationType = omKeyInfo.getType();
+
+        }
+        Preconditions.checkNotNull(replicationType,
+            "Replication type can't be identified");
+
         if (partKeyInfoMapIterator.hasNext()) {
           Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry =
               partKeyInfoMapIterator.next();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
new file mode 100644
index 0000000..a5a446c
--- /dev/null
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.ozone.om;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
+import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Unit test key manager.
+ */
+public class TestKeyManagerUnit {
+
+  private OmMetadataManagerImpl metadataManager;
+  private KeyManagerImpl keyManager;
+
+  @Before
+  public void setup() throws IOException {
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS,
+        GenericTestUtils.getRandomizedTestDir().toString());
+    metadataManager = new OmMetadataManagerImpl(configuration);
+    keyManager = new KeyManagerImpl(
+        Mockito.mock(ScmBlockLocationProtocol.class),
+        metadataManager,
+        configuration,
+        "omtest",
+        Mockito.mock(OzoneBlockTokenSecretManager.class)
+    );
+  }
+
+  @Test
+  public void listMultipartUploadPartsWithZeroUpload() throws IOException {
+    //GIVEN
+    createBucket(metadataManager, "vol1", "bucket1");
+
+    OmMultipartInfo omMultipartInfo =
+        initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1");
+
+    //WHEN
+    OmMultipartUploadListParts omMultipartUploadListParts = keyManager
+        .listParts("vol1", "bucket1", "dir/key1", 
omMultipartInfo.getUploadID(),
+            0, 10);
+
+    Assert.assertEquals(0,
+        omMultipartUploadListParts.getPartInfoList().size());
+
+  }
+
+  private void createBucket(OmMetadataManagerImpl omMetadataManager,
+      String volume, String bucket)
+      throws IOException {
+    omMetadataManager.getBucketTable()
+        .put(omMetadataManager.getBucketKey(volume, bucket),
+            OmBucketInfo.newBuilder()
+                .setVolumeName(volume)
+                .setBucketName(bucket)
+                .setStorageType(StorageType.DISK)
+                .setIsVersionEnabled(false)
+                .setAcls(new ArrayList<>())
+                .build());
+  }
+
+  private OmMultipartInfo initMultipartUpload(KeyManagerImpl omtest,
+      String volume, String bucket, String key)
+      throws IOException {
+    OmKeyArgs key1 = new Builder()
+        .setVolumeName(volume)
+        .setBucketName(bucket)
+        .setKeyName(key)
+        .setType(ReplicationType.RATIS)
+        .setFactor(ReplicationFactor.THREE)
+        .setAcls(new ArrayList<>())
+        .build();
+    return omtest.initiateMultipartUpload(key1);
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to