mhansonp commented on a change in pull request #7124:
URL: https://github.com/apache/geode/pull/7124#discussion_r780591300



##########
File path: 
geode-core/src/distributedTest/java/org/apache/geode/internal/cache/control/RebalanceOperationComplexPart2DistributedTest.java
##########
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.control;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDUNDANCY_ZONE;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.io.Serializable;
+import java.nio.file.Files;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.logging.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.client.ClientCache;
+import org.apache.geode.cache.client.ClientRegionFactory;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.control.ResourceManager;
+import org.apache.geode.internal.cache.PartitionAttributesImpl;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+
+/**
+ * The purpose of RebalanceOperationComplexDistributedTest is to test 
rebalances
+ * across zones and to ensure that enforceUniqueZone behavior of redundancy 
zones
+ * is working correctly.
+ */
+public class RebalanceOperationComplexPart2DistributedTest
+    implements Serializable {
+
+  private static final int EXPECTED_BUCKET_COUNT = 113;
+  private static final long TIMEOUT_SECONDS = 
GeodeAwaitility.getTimeout().getSeconds();
+  private static final String REGION_NAME = "primary";
+  private static final Logger logger = LogService.getLogger();
+
+  private static final String ZONE_A = "zoneA";
+  private static final String ZONE_B = "zoneB";
+
+  private int locatorPort;
+  private static final AtomicInteger runID = new AtomicInteger(0);
+  private String workingDir;
+
+  // 6 servers distributed evenly across 2 zones
+  private static Map<Integer, String> SERVER_ZONE_MAP;
+
+  @Rule
+  public ClusterStartupRule clusterStartupRule = new ClusterStartupRule(5);
+
+  @Before
+  public void setup() {
+    // Start the locator
+    MemberVM locatorVM = clusterStartupRule.startLocatorVM(0);
+    locatorPort = locatorVM.getPort();
+
+    workingDir = clusterStartupRule.getWorkingDirRoot().getAbsolutePath();
+
+    runID.incrementAndGet();
+  }
+
+  @After
+  public void after() {
+    stopServersAndDeleteDirectories();
+  }
+
+  protected void stopServersAndDeleteDirectories() {
+    for (Map.Entry<Integer, String> entry : SERVER_ZONE_MAP.entrySet()) {
+      clusterStartupRule.stop(entry.getKey(), true);
+    }
+    cleanOutServerDirectories();
+  }
+
+  /**
+   * Test that we correctly use the redundancy-zone property to determine 
where to place redundant
+   * copies of a buckets and doesn't allow cross redundancy zone deletes. This 
does not use a
+   * rebalance once the servers are down.
+   *
+   */
+  @Test
+  public void testRecoveryWithOneServerPermanentlyDownAndOneRestarted() throws 
Exception {
+
+    SERVER_ZONE_MAP = new HashMap<Integer, String>() {
+      {
+        put(1, ZONE_A);
+        put(2, ZONE_A);
+        put(3, ZONE_B);
+        put(4, ZONE_B);
+      }
+    };
+
+    cleanOutServerDirectories();

Review comment:
       Got rid of the cleanout method so this is moot.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to