HDDS-354. VolumeInfo.getScmUsed throws NPE. Contributed by Hanisha Koneru.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a07617f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a07617f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a07617f

Branch: refs/heads/HEAD
Commit: 2a07617f852ceddcf6b38ddcefd912fd953823d9
Parents: e6b77ad
Author: Bharat Viswanadham <bha...@apache.org>
Authored: Thu Oct 4 21:39:29 2018 -0700
Committer: Bharat Viswanadham <bha...@apache.org>
Committed: Thu Oct 4 21:46:17 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |  11 --
 .../states/endpoint/VersionEndpointTask.java    |  44 +++--
 .../container/common/utils/HddsVolumeUtil.java  |   2 +-
 .../container/common/volume/VolumeSet.java      | 178 ++++++++++---------
 .../container/keyvalue/KeyValueContainer.java   |   6 +-
 .../container/keyvalue/KeyValueHandler.java     |   4 +-
 6 files changed, 129 insertions(+), 116 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a07617f/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 599b4e8..e8aa22c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -286,17 +286,6 @@ public final class OzoneConfigKeys {
   public static final double
       HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.75;
 
-  public static final String
-      HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
-      "hdds.write.lock.reporting.threshold.ms";
-  public static final long
-      HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
-  public static final String
-      HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_KEY =
-      "hdds.lock.suppress.warning.interval.ms";
-  public static final long
-      HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_DEAFULT = 10000L;
-
   public static final String OZONE_CONTAINER_COPY_WORKDIR =
       "hdds.datanode.replication.work.dir";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a07617f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 64e078d..2d04677 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -69,31 +69,39 @@ public class VersionEndpointTask implements
       VersionResponse response = VersionResponse.getFromProtobuf(
           versionResponse);
       rpcEndPoint.setVersion(response);
-      VolumeSet volumeSet = ozoneContainer.getVolumeSet();
-      Map<String, HddsVolume> volumeMap = volumeSet.getVolumeMap();
 
       String scmId = response.getValue(OzoneConsts.SCM_ID);
       String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
 
-      Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " +
-          "null");
-      Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
-          "cannot be null");
+      // Check volumes
+      VolumeSet volumeSet = ozoneContainer.getVolumeSet();
+      volumeSet.readLock();
+      try {
+        Map<String, HddsVolume> volumeMap = volumeSet.getVolumeMap();
+
+        Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " +
+            "null");
+        Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
+            "cannot be null");
 
-      // If version file does not exist create version file and also set scmId
-      for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
-        HddsVolume hddsVolume = entry.getValue();
-        boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId,
-            clusterId, LOG);
-        if (!result) {
-          volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
+        // If version file does not exist create version file and also set 
scmId
+        for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
+          HddsVolume hddsVolume = entry.getValue();
+          boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId,
+              clusterId, LOG);
+          if (!result) {
+            volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
+          }
         }
+        if (volumeSet.getVolumesList().size() == 0) {
+          // All volumes are inconsistent state
+          throw new DiskOutOfSpaceException("All configured Volumes are in " +
+              "Inconsistent State");
+        }
+      } finally {
+        volumeSet.readUnlock();
       }
-      if (volumeSet.getVolumesList().size() == 0) {
-        // All volumes are inconsistent state
-        throw new DiskOutOfSpaceException("All configured Volumes are in " +
-            "Inconsistent State");
-      }
+
       ozoneContainer.getDispatcher().setScmId(scmId);
 
       EndpointStateMachine.EndPointStates nextState =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a07617f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index bc0bd05..cb356da 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -164,7 +164,7 @@ public final class HddsVolumeUtil {
   }
 
   /**
-   * Check Volume is consistent state or not.
+   * Check Volume is in consistent state or not.
    * @param hddsVolume
    * @param scmId
    * @param clusterId

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a07617f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 06f48fc..5b6b823 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -33,15 +33,11 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
 import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
-import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
-import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.InstrumentedLock;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -53,8 +49,7 @@ import java.util.EnumMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * VolumeSet to manage volumes in a DataNode.
@@ -84,11 +79,12 @@ public class VolumeSet {
   private EnumMap<StorageType, List<HddsVolume>> volumeStateMap;
 
   /**
-   * Lock to synchronize changes to the VolumeSet. Any update to
-   * {@link VolumeSet#volumeMap}, {@link VolumeSet#failedVolumeMap}, or
-   * {@link VolumeSet#volumeStateMap} should be done after acquiring this lock.
+   * A Reentrant Read Write Lock to synchronize volume operations in VolumeSet.
+   * Any update to {@link VolumeSet#volumeMap},
+   * {@link VolumeSet#failedVolumeMap}, or {@link VolumeSet#volumeStateMap}
+   * should be done after acquiring the write lock.
    */
-  private final AutoCloseableLock volumeSetLock;
+  private final ReentrantReadWriteLock volumeSetRWLock;
 
   private final String datanodeUuid;
   private String clusterID;
@@ -105,17 +101,7 @@ public class VolumeSet {
     this.datanodeUuid = dnUuid;
     this.clusterID = clusterID;
     this.conf = conf;
-    this.volumeSetLock = new AutoCloseableLock(
-        new InstrumentedLock(getClass().getName(), LOG,
-            new ReentrantLock(true),
-            conf.getTimeDuration(
-                OzoneConfigKeys.HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
-                OzoneConfigKeys.HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT,
-                TimeUnit.MILLISECONDS),
-            conf.getTimeDuration(
-                OzoneConfigKeys.HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_KEY,
-                OzoneConfigKeys.HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_DEAFULT,
-                TimeUnit.MILLISECONDS)));
+    this.volumeSetRWLock = new ReentrantReadWriteLock();
 
     initializeVolumeSet();
   }
@@ -198,14 +184,35 @@ public class VolumeSet {
     }
   }
 
-  public void acquireLock() {
-    volumeSetLock.acquire();
+  /**
+   * Acquire Volume Set Read lock.
+   */
+  public void readLock() {
+    volumeSetRWLock.readLock().lock();
+  }
+
+  /**
+   * Release Volume Set Read lock.
+   */
+  public void readUnlock() {
+    volumeSetRWLock.readLock().unlock();
   }
 
-  public void releaseLock() {
-    volumeSetLock.release();
+  /**
+   * Acquire Volume Set Write lock.
+   */
+  public void writeLock() {
+    volumeSetRWLock.writeLock().lock();
+  }
+
+  /**
+   * Release Volume Set Write lock.
+   */
+  public void writeUnlock() {
+    volumeSetRWLock.writeLock().unlock();
   }
 
+
   private HddsVolume createVolume(String locationString,
       StorageType storageType) throws IOException {
     HddsVolume.Builder volumeBuilder = new HddsVolume.Builder(locationString)
@@ -227,7 +234,8 @@ public class VolumeSet {
     String hddsRoot = HddsVolumeUtil.getHddsRoot(volumeRoot);
     boolean success;
 
-    try (AutoCloseableLock lock = volumeSetLock.acquire()) {
+    this.writeLock();
+    try {
       if (volumeMap.containsKey(hddsRoot)) {
         LOG.warn("Volume : {} already exists in VolumeMap", hddsRoot);
         success = false;
@@ -247,6 +255,8 @@ public class VolumeSet {
     } catch (IOException ex) {
       LOG.error("Failed to add volume " + volumeRoot + " to VolumeSet", ex);
       success = false;
+    } finally {
+      this.writeUnlock();
     }
     return success;
   }
@@ -255,7 +265,8 @@ public class VolumeSet {
   public void failVolume(String dataDir) {
     String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir);
 
-    try (AutoCloseableLock lock = volumeSetLock.acquire()) {
+    this.writeLock();
+    try {
       if (volumeMap.containsKey(hddsRoot)) {
         HddsVolume hddsVolume = volumeMap.get(hddsRoot);
         hddsVolume.failVolume();
@@ -270,6 +281,8 @@ public class VolumeSet {
       } else {
         LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot);
       }
+    } finally {
+      this.writeUnlock();
     }
   }
 
@@ -277,7 +290,8 @@ public class VolumeSet {
   public void removeVolume(String dataDir) throws IOException {
     String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir);
 
-    try (AutoCloseableLock lock = volumeSetLock.acquire()) {
+    this.writeLock();
+    try {
       if (volumeMap.containsKey(hddsRoot)) {
         HddsVolume hddsVolume = volumeMap.get(hddsRoot);
         hddsVolume.shutdown();
@@ -295,14 +309,11 @@ public class VolumeSet {
       } else {
         LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot);
       }
+    } finally {
+      this.writeUnlock();
     }
   }
 
-  public HddsVolume chooseVolume(long containerSize,
-      VolumeChoosingPolicy choosingPolicy) throws IOException {
-    return choosingPolicy.chooseVolume(getVolumesList(), containerSize);
-  }
-
   /**
    * This method, call shutdown on each volume to shutdown volume usage
    * thread and write scmUsed on each volume.
@@ -352,55 +363,60 @@ public class VolumeSet {
   public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
       throws IOException {
     boolean failed;
-    StorageLocationReport[] reports = new StorageLocationReport[volumeMap
-        .size() + failedVolumeMap.size()];
-    int counter = 0;
-    HddsVolume hddsVolume;
-    for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
-      hddsVolume = entry.getValue();
-      VolumeInfo volumeInfo = hddsVolume.getVolumeInfo();
-      long scmUsed = 0;
-      long remaining = 0;
-      failed = false;
-      try {
-        scmUsed = volumeInfo.getScmUsed();
-        remaining = volumeInfo.getAvailable();
-      } catch (IOException ex) {
-        LOG.warn("Failed to get scmUsed and remaining for container " +
-            "storage location {}", volumeInfo.getRootDir());
-        // reset scmUsed and remaining if df/du failed.
-        scmUsed = 0;
-        remaining = 0;
-        failed = true;
-      }
+    this.readLock();
+    try {
+      StorageLocationReport[] reports = new StorageLocationReport[volumeMap
+          .size() + failedVolumeMap.size()];
+      int counter = 0;
+      HddsVolume hddsVolume;
+      for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
+        hddsVolume = entry.getValue();
+        VolumeInfo volumeInfo = hddsVolume.getVolumeInfo();
+        long scmUsed = 0;
+        long remaining = 0;
+        failed = false;
+        try {
+          scmUsed = volumeInfo.getScmUsed();
+          remaining = volumeInfo.getAvailable();
+        } catch (IOException ex) {
+          LOG.warn("Failed to get scmUsed and remaining for container " +
+              "storage location {}", volumeInfo.getRootDir());
+          // reset scmUsed and remaining if df/du failed.
+          scmUsed = 0;
+          remaining = 0;
+          failed = true;
+        }
 
-      StorageLocationReport.Builder builder =
-          StorageLocationReport.newBuilder();
-      builder.setStorageLocation(volumeInfo.getRootDir())
-          .setId(hddsVolume.getStorageID())
-          .setFailed(failed)
-          .setCapacity(hddsVolume.getCapacity())
-          .setRemaining(remaining)
-          .setScmUsed(scmUsed)
-          .setStorageType(hddsVolume.getStorageType());
-      StorageLocationReport r = builder.build();
-      reports[counter++] = r;
-    }
-    for (Map.Entry<String, HddsVolume> entry : failedVolumeMap.entrySet()) {
-      hddsVolume = entry.getValue();
-      StorageLocationReport.Builder builder = StorageLocationReport
-          .newBuilder();
-      builder.setStorageLocation(hddsVolume.getHddsRootDir()
-          .getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true)
-          .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType(
-              hddsVolume.getStorageType());
-      StorageLocationReport r = builder.build();
-      reports[counter++] = r;
-    }
-    NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
-    for (int i = 0; i < reports.length; i++) {
-      nrb.addStorageReport(reports[i].getProtoBufMessage());
+        StorageLocationReport.Builder builder =
+            StorageLocationReport.newBuilder();
+        builder.setStorageLocation(volumeInfo.getRootDir())
+            .setId(hddsVolume.getStorageID())
+            .setFailed(failed)
+            .setCapacity(hddsVolume.getCapacity())
+            .setRemaining(remaining)
+            .setScmUsed(scmUsed)
+            .setStorageType(hddsVolume.getStorageType());
+        StorageLocationReport r = builder.build();
+        reports[counter++] = r;
+      }
+      for (Map.Entry<String, HddsVolume> entry : failedVolumeMap.entrySet()) {
+        hddsVolume = entry.getValue();
+        StorageLocationReport.Builder builder = StorageLocationReport
+            .newBuilder();
+        builder.setStorageLocation(hddsVolume.getHddsRootDir()
+            
.getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true)
+            .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType(
+            hddsVolume.getStorageType());
+        StorageLocationReport r = builder.build();
+        reports[counter++] = r;
+      }
+      NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
+      for (int i = 0; i < reports.length; i++) {
+        nrb.addStorageReport(reports[i].getProtoBufMessage());
+      }
+      return nrb.build();
+    } finally {
+      this.readUnlock();
     }
-    return nrb.build();
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a07617f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 09d4054..e5b344d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -108,8 +108,8 @@ public class KeyValueContainer implements 
Container<KeyValueContainerData> {
     Preconditions.checkNotNull(scmId, "scmId cannot be null");
 
     File containerMetaDataPath = null;
-    //acquiring volumeset lock and container lock
-    volumeSet.acquireLock();
+    //acquiring volumeset read lock
+    volumeSet.readLock();
     long maxSize = containerData.getMaxSize();
     try {
       HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
@@ -166,7 +166,7 @@ public class KeyValueContainer implements 
Container<KeyValueContainerData> {
       throw new StorageContainerException("Container creation failed.", ex,
           CONTAINER_INTERNAL_ERROR);
     } finally {
-      volumeSet.releaseLock();
+      volumeSet.readUnlock();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a07617f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 5be6e28..922db2a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -271,14 +271,14 @@ public class KeyValueHandler extends Handler {
 
   public void populateContainerPathFields(KeyValueContainer container,
       long maxSize) throws IOException {
-    volumeSet.acquireLock();
+    volumeSet.readLock();
     try {
       HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
           .getVolumesList(), maxSize);
       String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
       container.populatePathFields(scmID, containerVolume, hddsVolumeDir);
     } finally {
-      volumeSet.releaseLock();
+      volumeSet.readUnlock();
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to