Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEM-983 8165e6047 -> 4b80441bc


back to volatile boolean so that second SDA will wait for the first


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/4b80441b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/4b80441b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/4b80441b

Branch: refs/heads/feature/GEM-983
Commit: 4b80441bc0c7f69285bbcf47a4314820525a4051
Parents: 8165e60
Author: Darrel Schneider <dschnei...@pivotal.io>
Authored: Wed Oct 12 14:50:09 2016 -0700
Committer: Darrel Schneider <dschnei...@pivotal.io>
Committed: Wed Oct 12 14:50:09 2016 -0700

----------------------------------------------------------------------
 .../geode/internal/cache/GemFireCacheImpl.java  | 41 +++++++++-----------
 1 file changed, 19 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/4b80441b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
----------------------------------------------------------------------
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
index 0e23aaa..20eed44 100755
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java
@@ -523,7 +523,7 @@ public class GemFireCacheImpl implements InternalCache, 
ClientCache, HasCachePer
 
   private final Object clientMetaDatServiceLock = new Object();
 
-  private final AtomicBoolean isShutDownAll = new AtomicBoolean(false);
+  private volatile boolean isShutDownAll = false;
 
   private final ResourceAdvisor resourceAdvisor;
   private final JmxManagerAdvisor jmxAdvisor;
@@ -1641,7 +1641,7 @@ public class GemFireCacheImpl implements InternalCache, 
ClientCache, HasCachePer
   }
 
   public boolean isCacheAtShutdownAll() {
-    return isShutDownAll.get();
+    return isShutDownAll;
   }
 
   /**
@@ -1656,29 +1656,26 @@ public class GemFireCacheImpl implements InternalCache, 
ClientCache, HasCachePer
   }
 
   public void shutDownAll() {
-    boolean testIGE = Boolean.getBoolean("TestInternalGemFireError");
+    synchronized (GemFireCacheImpl.class) {
+      boolean testIGE = Boolean.getBoolean("TestInternalGemFireError");
 
-    if (testIGE) {
-      InternalGemFireError assErr = new 
InternalGemFireError(LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString());
-      throw assErr;
-    }
-    if (isCacheAtShutdownAll()) {
-      // it's already doing shutdown by another thread
-      return;
-    }
-    if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
-      try {
-        CacheObserverHolder.getInstance().beforeShutdownAll();
-      } finally {
-        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
+      if (testIGE) {
+        InternalGemFireError assErr = new 
InternalGemFireError(LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString());
+        throw assErr;
       }
-    }
-    if (!this.isShutDownAll.compareAndSet(false, true)) {
-      // it's already doing shutdown by another thread
-      return;
-    }
+      if (isCacheAtShutdownAll()) {
+        // it's already doing shutdown by another thread
+        return;
+      }
+      if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
+        try {
+          CacheObserverHolder.getInstance().beforeShutdownAll();
+        } finally {
+          LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
+        }
+      }
+      this.isShutDownAll = true;
 
-    synchronized (GemFireCacheImpl.class) {
       // bug 44031 requires multithread shutdownall should be grouped
       // by root region. However, shutDownAllDuringRecovery.conf test revealed 
that
       // we have to close colocated child regions first.

Reply via email to