[ 
https://issues.apache.org/jira/browse/GEODE-8864?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17291699#comment-17291699
 ] 

ASF GitHub Bot commented on GEODE-8864:
---------------------------------------

jhutchison commented on a change in pull request #5954:
URL: https://github.com/apache/geode/pull/5954#discussion_r583259966



##########
File path: 
geode-redis/src/distributedTest/java/org/apache/geode/redis/internal/executor/hash/HScanDunitTest.java
##########
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.hash;
+
+import static 
org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_BIND_ADDRESS;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_ENABLED;
+import static org.apache.geode.distributed.ConfigurationProperties.REDIS_PORT;
+import static 
org.apache.geode.redis.internal.GeodeRedisServer.ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import io.lettuce.core.ClientOptions;
+import io.lettuce.core.MapScanCursor;
+import io.lettuce.core.RedisClient;
+import io.lettuce.core.RedisCommandExecutionException;
+import io.lettuce.core.RedisException;
+import io.lettuce.core.ScanCursor;
+import io.lettuce.core.api.StatefulRedisConnection;
+import io.lettuce.core.api.sync.RedisCommands;
+import io.lettuce.core.resource.ClientResources;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.control.RebalanceFactory;
+import org.apache.geode.cache.control.ResourceManager;
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.AvailablePortHelper;
+import 
org.apache.geode.redis.session.springRedisTestApplication.config.DUnitSocketAddressResolver;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+
+public class HScanDunitTest {
+  @ClassRule
+  public static ClusterStartupRule clusterStartUp = new ClusterStartupRule(4);
+
+  @Rule
+  public ExecutorServiceRule executor = new ExecutorServiceRule();
+
+  @ClassRule
+  public static GfshCommandRule gfsh = new GfshCommandRule();
+
+  private static RedisCommands<String, String> commands;
+  private RedisClient redisClient;
+  private StatefulRedisConnection<String, String> connection;
+  private static Properties locatorProperties;
+
+  private static MemberVM locator;
+  private static MemberVM server1;
+  private static MemberVM server2;
+  private static MemberVM server3;
+
+  static final String HASH_KEY = "key";
+  static final String BASE_FIELD = "baseField_";
+  static final Map<String, String> INITIAL_DATA_SET = makeEntrySet(1000);
+
+  static int[] redisPorts;
+
+  @BeforeClass
+  public static void classSetup() throws Exception {
+    int locatorPort;
+    locatorProperties = new Properties();
+    locatorProperties.setProperty(MAX_WAIT_TIME_RECONNECT, "15000");
+
+    locator = clusterStartUp.startLocatorVM(0, locatorProperties);
+    locatorPort = locator.getPort();
+    redisPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+
+    // note: due to rules around member weighting in split-brain scenarios,
+    // vm1 (server1) should not be crashed or it will cause additional 
(unrelated) failures
+    String redisPort1 = redisPorts[0] + "";
+    server1 =
+        clusterStartUp.startServerVM(1,
+            x -> x.withProperty(REDIS_PORT, redisPort1)
+                .withProperty(REDIS_ENABLED, "true")
+                .withProperty(REDIS_BIND_ADDRESS, "localhost")
+                
.withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                    "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+                .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+                .withConnectionToLocator(locatorPort));
+
+    String redisPort2 = redisPorts[1] + "";
+    server2 = clusterStartUp.startServerVM(2,
+        x -> x.withProperty(REDIS_PORT, redisPort2)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    String redisPort3 = redisPorts[2] + "";
+    server3 = clusterStartUp.startServerVM(3,
+        x -> x.withProperty(REDIS_PORT, redisPort3)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    gfsh.connectAndVerify(locator);
+
+  }
+
+  @Before
+  public void testSetup() {
+    addIgnoredException(FunctionException.class);
+    String[] redisPortsAsStrings = new String[redisPorts.length];
+
+    for (int i = 0; i < redisPorts.length; i++) {
+      redisPortsAsStrings[i] = String.valueOf(redisPorts[i]);
+    }
+
+    DUnitSocketAddressResolver dnsResolver =
+        new DUnitSocketAddressResolver(redisPortsAsStrings);
+
+    ClientResources resources = ClientResources.builder()
+        .socketAddressResolver(dnsResolver)
+        .build();
+
+    redisClient = RedisClient.create(resources, "redis://localhost");
+    redisClient.setOptions(ClientOptions.builder()
+        .autoReconnect(true)
+        .build());
+
+    connection = redisClient.connect();
+    commands = connection.sync();
+    commands.hset(HASH_KEY, INITIAL_DATA_SET);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    commands.quit();
+
+    server1.stop();
+    server2.stop();
+    server3.stop();
+  }
+
+  @Test
+  public void 
should_allow_hscan_iteration_to_complete_successfully_given_server_crashes_during_iteration()
+      throws ExecutionException, InterruptedException {
+
+    AtomicBoolean keepCrashingVMs = new AtomicBoolean(true);
+    AtomicInteger numberOfTimesServersCrashed = new AtomicInteger(0);
+
+    Future<Void> hScanFuture =
+        executor.runAsync(
+            () -> doHScanContinuallyAndAssertOnResults(keepCrashingVMs,
+                numberOfTimesServersCrashed));
+
+    Future<Void> crashingVmFuture =
+        executor.runAsync(
+            () -> crashAlternatingServers(keepCrashingVMs, 
numberOfTimesServersCrashed));
+
+
+    hScanFuture.get();
+    crashingVmFuture.get();
+  }
+
+
+  private static void doHScanContinuallyAndAssertOnResults(AtomicBoolean 
keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+    int numberOfAssertionsCompleted = 0;
+
+    ScanCursor scanCursor = new ScanCursor("0", false);
+    List<String> allEntries = new ArrayList<>();
+    MapScanCursor<String, String> result;
+
+    while (numberOfAssertionsCompleted < 3 || 
numberOfTimesServersCrashed.get() < 3) {
+
+      allEntries.clear();
+      scanCursor.setCursor("0");
+      scanCursor.setFinished(false);
+
+      try {
+        do {
+          result = commands.hscan(HASH_KEY, scanCursor);
+          scanCursor.setCursor(result.getCursor());
+          Map<String, String> resultEntries = result.getMap();
+
+          resultEntries
+              .entrySet()
+              .forEach(
+                  entry -> allEntries.add(entry.getKey()));
+
+        } while (!result.isFinished());
+
+        assertThat(allEntries).containsAll(INITIAL_DATA_SET.keySet());
+        numberOfAssertionsCompleted++;
+
+      } catch (RedisCommandExecutionException ignore) {
+      } catch (RedisException ex) {
+        if (ex.getMessage().contains("Connection reset by peer")) {// ignore 
error
+        } else {
+          throw ex;
+        }
+      }
+    }
+    keepCrashingServers.set(false);
+  }
+
+
+
+  private void crashAlternatingServers(AtomicBoolean keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+
+    int serverToCrashToggle = 3;
+    MemberVM server = null;

Review comment:
       ok-no longer intialized

##########
File path: 
geode-redis/src/distributedTest/java/org/apache/geode/redis/internal/executor/hash/HScanDunitTest.java
##########
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.hash;
+
+import static 
org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_BIND_ADDRESS;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_ENABLED;
+import static org.apache.geode.distributed.ConfigurationProperties.REDIS_PORT;
+import static 
org.apache.geode.redis.internal.GeodeRedisServer.ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import io.lettuce.core.ClientOptions;
+import io.lettuce.core.MapScanCursor;
+import io.lettuce.core.RedisClient;
+import io.lettuce.core.RedisCommandExecutionException;
+import io.lettuce.core.RedisException;
+import io.lettuce.core.ScanCursor;
+import io.lettuce.core.api.StatefulRedisConnection;
+import io.lettuce.core.api.sync.RedisCommands;
+import io.lettuce.core.resource.ClientResources;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.control.RebalanceFactory;
+import org.apache.geode.cache.control.ResourceManager;
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.AvailablePortHelper;
+import 
org.apache.geode.redis.session.springRedisTestApplication.config.DUnitSocketAddressResolver;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+
+public class HScanDunitTest {
+  @ClassRule
+  public static ClusterStartupRule clusterStartUp = new ClusterStartupRule(4);
+
+  @Rule
+  public ExecutorServiceRule executor = new ExecutorServiceRule();
+
+  @ClassRule
+  public static GfshCommandRule gfsh = new GfshCommandRule();
+
+  private static RedisCommands<String, String> commands;
+  private RedisClient redisClient;
+  private StatefulRedisConnection<String, String> connection;
+  private static Properties locatorProperties;
+
+  private static MemberVM locator;
+  private static MemberVM server1;
+  private static MemberVM server2;
+  private static MemberVM server3;
+
+  static final String HASH_KEY = "key";
+  static final String BASE_FIELD = "baseField_";
+  static final Map<String, String> INITIAL_DATA_SET = makeEntrySet(1000);
+
+  static int[] redisPorts;
+
+  @BeforeClass
+  public static void classSetup() throws Exception {
+    int locatorPort;
+    locatorProperties = new Properties();
+    locatorProperties.setProperty(MAX_WAIT_TIME_RECONNECT, "15000");
+
+    locator = clusterStartUp.startLocatorVM(0, locatorProperties);
+    locatorPort = locator.getPort();
+    redisPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+
+    // note: due to rules around member weighting in split-brain scenarios,
+    // vm1 (server1) should not be crashed or it will cause additional 
(unrelated) failures
+    String redisPort1 = redisPorts[0] + "";
+    server1 =
+        clusterStartUp.startServerVM(1,
+            x -> x.withProperty(REDIS_PORT, redisPort1)
+                .withProperty(REDIS_ENABLED, "true")
+                .withProperty(REDIS_BIND_ADDRESS, "localhost")
+                
.withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                    "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+                .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+                .withConnectionToLocator(locatorPort));
+
+    String redisPort2 = redisPorts[1] + "";
+    server2 = clusterStartUp.startServerVM(2,
+        x -> x.withProperty(REDIS_PORT, redisPort2)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    String redisPort3 = redisPorts[2] + "";
+    server3 = clusterStartUp.startServerVM(3,
+        x -> x.withProperty(REDIS_PORT, redisPort3)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    gfsh.connectAndVerify(locator);
+
+  }
+
+  @Before
+  public void testSetup() {
+    addIgnoredException(FunctionException.class);
+    String[] redisPortsAsStrings = new String[redisPorts.length];
+
+    for (int i = 0; i < redisPorts.length; i++) {
+      redisPortsAsStrings[i] = String.valueOf(redisPorts[i]);
+    }
+
+    DUnitSocketAddressResolver dnsResolver =
+        new DUnitSocketAddressResolver(redisPortsAsStrings);
+
+    ClientResources resources = ClientResources.builder()
+        .socketAddressResolver(dnsResolver)
+        .build();
+
+    redisClient = RedisClient.create(resources, "redis://localhost");
+    redisClient.setOptions(ClientOptions.builder()
+        .autoReconnect(true)
+        .build());
+
+    connection = redisClient.connect();
+    commands = connection.sync();
+    commands.hset(HASH_KEY, INITIAL_DATA_SET);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    commands.quit();
+
+    server1.stop();
+    server2.stop();
+    server3.stop();
+  }
+
+  @Test
+  public void 
should_allow_hscan_iteration_to_complete_successfully_given_server_crashes_during_iteration()
+      throws ExecutionException, InterruptedException {
+
+    AtomicBoolean keepCrashingVMs = new AtomicBoolean(true);
+    AtomicInteger numberOfTimesServersCrashed = new AtomicInteger(0);
+
+    Future<Void> hScanFuture =
+        executor.runAsync(
+            () -> doHScanContinuallyAndAssertOnResults(keepCrashingVMs,
+                numberOfTimesServersCrashed));
+
+    Future<Void> crashingVmFuture =
+        executor.runAsync(
+            () -> crashAlternatingServers(keepCrashingVMs, 
numberOfTimesServersCrashed));
+
+
+    hScanFuture.get();
+    crashingVmFuture.get();
+  }
+
+
+  private static void doHScanContinuallyAndAssertOnResults(AtomicBoolean 
keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+    int numberOfAssertionsCompleted = 0;
+
+    ScanCursor scanCursor = new ScanCursor("0", false);
+    List<String> allEntries = new ArrayList<>();
+    MapScanCursor<String, String> result;
+
+    while (numberOfAssertionsCompleted < 3 || 
numberOfTimesServersCrashed.get() < 3) {
+
+      allEntries.clear();
+      scanCursor.setCursor("0");
+      scanCursor.setFinished(false);
+
+      try {
+        do {
+          result = commands.hscan(HASH_KEY, scanCursor);
+          scanCursor.setCursor(result.getCursor());
+          Map<String, String> resultEntries = result.getMap();
+
+          resultEntries
+              .entrySet()
+              .forEach(
+                  entry -> allEntries.add(entry.getKey()));
+
+        } while (!result.isFinished());
+
+        assertThat(allEntries).containsAll(INITIAL_DATA_SET.keySet());
+        numberOfAssertionsCompleted++;
+
+      } catch (RedisCommandExecutionException ignore) {
+      } catch (RedisException ex) {
+        if (ex.getMessage().contains("Connection reset by peer")) {// ignore 
error
+        } else {
+          throw ex;
+        }
+      }
+    }
+    keepCrashingServers.set(false);
+  }
+
+
+
+  private void crashAlternatingServers(AtomicBoolean keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+
+    int serverToCrashToggle = 3;
+    MemberVM server = null;
+    int redisPort;
+
+    do {
+      redisPort = redisPorts[serverToCrashToggle - 1];
+      if (serverToCrashToggle == 3) {
+        server = server3;
+      } else if (serverToCrashToggle == 2) {
+        server = server2;
+      } else {
+        // blow up if unexpected number
+        assertThat(true).isFalse()
+            .withFailMessage("something is wrong with this test setup");
+      }
+
+      clusterStartUp.crashVM(serverToCrashToggle);
+      server = startRedisVM(serverToCrashToggle, redisPort);

Review comment:
       yeah, I guess after some refactoring that section is no longer needed.  
Removed

##########
File path: 
geode-redis/src/distributedTest/java/org/apache/geode/redis/internal/executor/hash/HScanDunitTest.java
##########
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.hash;
+
+import static 
org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_BIND_ADDRESS;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_ENABLED;
+import static org.apache.geode.distributed.ConfigurationProperties.REDIS_PORT;
+import static 
org.apache.geode.redis.internal.GeodeRedisServer.ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import io.lettuce.core.ClientOptions;
+import io.lettuce.core.MapScanCursor;
+import io.lettuce.core.RedisClient;
+import io.lettuce.core.RedisCommandExecutionException;
+import io.lettuce.core.RedisException;
+import io.lettuce.core.ScanCursor;
+import io.lettuce.core.api.StatefulRedisConnection;
+import io.lettuce.core.api.sync.RedisCommands;
+import io.lettuce.core.resource.ClientResources;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.control.RebalanceFactory;
+import org.apache.geode.cache.control.ResourceManager;
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.AvailablePortHelper;
+import 
org.apache.geode.redis.session.springRedisTestApplication.config.DUnitSocketAddressResolver;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+
+public class HScanDunitTest {
+  @ClassRule
+  public static ClusterStartupRule clusterStartUp = new ClusterStartupRule(4);
+
+  @Rule
+  public ExecutorServiceRule executor = new ExecutorServiceRule();
+
+  @ClassRule
+  public static GfshCommandRule gfsh = new GfshCommandRule();
+
+  private static RedisCommands<String, String> commands;
+  private RedisClient redisClient;
+  private StatefulRedisConnection<String, String> connection;
+  private static Properties locatorProperties;
+
+  private static MemberVM locator;
+  private static MemberVM server1;
+  private static MemberVM server2;
+  private static MemberVM server3;
+
+  static final String HASH_KEY = "key";
+  static final String BASE_FIELD = "baseField_";
+  static final Map<String, String> INITIAL_DATA_SET = makeEntrySet(1000);
+
+  static int[] redisPorts;
+
+  @BeforeClass
+  public static void classSetup() throws Exception {
+    int locatorPort;
+    locatorProperties = new Properties();
+    locatorProperties.setProperty(MAX_WAIT_TIME_RECONNECT, "15000");
+
+    locator = clusterStartUp.startLocatorVM(0, locatorProperties);
+    locatorPort = locator.getPort();
+    redisPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+
+    // note: due to rules around member weighting in split-brain scenarios,
+    // vm1 (server1) should not be crashed or it will cause additional 
(unrelated) failures
+    String redisPort1 = redisPorts[0] + "";
+    server1 =
+        clusterStartUp.startServerVM(1,
+            x -> x.withProperty(REDIS_PORT, redisPort1)
+                .withProperty(REDIS_ENABLED, "true")
+                .withProperty(REDIS_BIND_ADDRESS, "localhost")
+                
.withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                    "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+                .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+                .withConnectionToLocator(locatorPort));
+
+    String redisPort2 = redisPorts[1] + "";
+    server2 = clusterStartUp.startServerVM(2,
+        x -> x.withProperty(REDIS_PORT, redisPort2)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    String redisPort3 = redisPorts[2] + "";
+    server3 = clusterStartUp.startServerVM(3,
+        x -> x.withProperty(REDIS_PORT, redisPort3)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    gfsh.connectAndVerify(locator);
+
+  }
+
+  @Before
+  public void testSetup() {
+    addIgnoredException(FunctionException.class);
+    String[] redisPortsAsStrings = new String[redisPorts.length];
+
+    for (int i = 0; i < redisPorts.length; i++) {
+      redisPortsAsStrings[i] = String.valueOf(redisPorts[i]);
+    }
+
+    DUnitSocketAddressResolver dnsResolver =
+        new DUnitSocketAddressResolver(redisPortsAsStrings);
+
+    ClientResources resources = ClientResources.builder()
+        .socketAddressResolver(dnsResolver)
+        .build();
+
+    redisClient = RedisClient.create(resources, "redis://localhost");
+    redisClient.setOptions(ClientOptions.builder()
+        .autoReconnect(true)
+        .build());
+
+    connection = redisClient.connect();
+    commands = connection.sync();
+    commands.hset(HASH_KEY, INITIAL_DATA_SET);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    commands.quit();
+
+    server1.stop();
+    server2.stop();
+    server3.stop();
+  }
+
+  @Test
+  public void 
should_allow_hscan_iteration_to_complete_successfully_given_server_crashes_during_iteration()
+      throws ExecutionException, InterruptedException {
+
+    AtomicBoolean keepCrashingVMs = new AtomicBoolean(true);
+    AtomicInteger numberOfTimesServersCrashed = new AtomicInteger(0);
+
+    Future<Void> hScanFuture =
+        executor.runAsync(
+            () -> doHScanContinuallyAndAssertOnResults(keepCrashingVMs,
+                numberOfTimesServersCrashed));
+
+    Future<Void> crashingVmFuture =
+        executor.runAsync(
+            () -> crashAlternatingServers(keepCrashingVMs, 
numberOfTimesServersCrashed));
+
+
+    hScanFuture.get();
+    crashingVmFuture.get();
+  }
+
+
+  private static void doHScanContinuallyAndAssertOnResults(AtomicBoolean 
keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+    int numberOfAssertionsCompleted = 0;
+
+    ScanCursor scanCursor = new ScanCursor("0", false);
+    List<String> allEntries = new ArrayList<>();
+    MapScanCursor<String, String> result;
+
+    while (numberOfAssertionsCompleted < 3 || 
numberOfTimesServersCrashed.get() < 3) {
+
+      allEntries.clear();
+      scanCursor.setCursor("0");
+      scanCursor.setFinished(false);
+
+      try {
+        do {
+          result = commands.hscan(HASH_KEY, scanCursor);
+          scanCursor.setCursor(result.getCursor());
+          Map<String, String> resultEntries = result.getMap();
+
+          resultEntries
+              .entrySet()
+              .forEach(
+                  entry -> allEntries.add(entry.getKey()));
+
+        } while (!result.isFinished());
+
+        assertThat(allEntries).containsAll(INITIAL_DATA_SET.keySet());
+        numberOfAssertionsCompleted++;
+
+      } catch (RedisCommandExecutionException ignore) {
+      } catch (RedisException ex) {
+        if (ex.getMessage().contains("Connection reset by peer")) {// ignore 
error
+        } else {
+          throw ex;
+        }
+      }
+    }
+    keepCrashingServers.set(false);
+  }
+
+
+
+  private void crashAlternatingServers(AtomicBoolean keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+
+    int serverToCrashToggle = 3;
+    MemberVM server = null;
+    int redisPort;
+
+    do {
+      redisPort = redisPorts[serverToCrashToggle - 1];
+      if (serverToCrashToggle == 3) {
+        server = server3;
+      } else if (serverToCrashToggle == 2) {
+        server = server2;
+      } else {
+        // blow up if unexpected number
+        assertThat(true).isFalse()

Review comment:
       cool-  thanks-  removing this section as per above

##########
File path: 
geode-redis/src/distributedTest/java/org/apache/geode/redis/internal/executor/hash/HScanDunitTest.java
##########
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.hash;
+
+import static 
org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_BIND_ADDRESS;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_ENABLED;
+import static org.apache.geode.distributed.ConfigurationProperties.REDIS_PORT;
+import static 
org.apache.geode.redis.internal.GeodeRedisServer.ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import io.lettuce.core.ClientOptions;
+import io.lettuce.core.MapScanCursor;
+import io.lettuce.core.RedisClient;
+import io.lettuce.core.RedisCommandExecutionException;
+import io.lettuce.core.RedisException;
+import io.lettuce.core.ScanCursor;
+import io.lettuce.core.api.StatefulRedisConnection;
+import io.lettuce.core.api.sync.RedisCommands;
+import io.lettuce.core.resource.ClientResources;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.control.RebalanceFactory;
+import org.apache.geode.cache.control.ResourceManager;
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.AvailablePortHelper;
+import 
org.apache.geode.redis.session.springRedisTestApplication.config.DUnitSocketAddressResolver;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+
+public class HScanDunitTest {
+  @ClassRule
+  public static ClusterStartupRule clusterStartUp = new ClusterStartupRule(4);
+
+  @Rule
+  public ExecutorServiceRule executor = new ExecutorServiceRule();
+
+  @ClassRule
+  public static GfshCommandRule gfsh = new GfshCommandRule();
+
+  private static RedisCommands<String, String> commands;
+  private RedisClient redisClient;
+  private StatefulRedisConnection<String, String> connection;
+  private static Properties locatorProperties;
+
+  private static MemberVM locator;
+  private static MemberVM server1;
+  private static MemberVM server2;
+  private static MemberVM server3;
+
+  static final String HASH_KEY = "key";
+  static final String BASE_FIELD = "baseField_";
+  static final Map<String, String> INITIAL_DATA_SET = makeEntrySet(1000);
+
+  static int[] redisPorts;
+
+  @BeforeClass
+  public static void classSetup() throws Exception {
+    int locatorPort;
+    locatorProperties = new Properties();
+    locatorProperties.setProperty(MAX_WAIT_TIME_RECONNECT, "15000");
+
+    locator = clusterStartUp.startLocatorVM(0, locatorProperties);
+    locatorPort = locator.getPort();
+    redisPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+
+    // note: due to rules around member weighting in split-brain scenarios,
+    // vm1 (server1) should not be crashed or it will cause additional 
(unrelated) failures
+    String redisPort1 = redisPorts[0] + "";
+    server1 =
+        clusterStartUp.startServerVM(1,
+            x -> x.withProperty(REDIS_PORT, redisPort1)
+                .withProperty(REDIS_ENABLED, "true")
+                .withProperty(REDIS_BIND_ADDRESS, "localhost")
+                
.withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                    "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+                .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+                .withConnectionToLocator(locatorPort));
+
+    String redisPort2 = redisPorts[1] + "";
+    server2 = clusterStartUp.startServerVM(2,
+        x -> x.withProperty(REDIS_PORT, redisPort2)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    String redisPort3 = redisPorts[2] + "";
+    server3 = clusterStartUp.startServerVM(3,
+        x -> x.withProperty(REDIS_PORT, redisPort3)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    gfsh.connectAndVerify(locator);
+
+  }
+
+  @Before
+  public void testSetup() {
+    addIgnoredException(FunctionException.class);
+    String[] redisPortsAsStrings = new String[redisPorts.length];
+
+    for (int i = 0; i < redisPorts.length; i++) {
+      redisPortsAsStrings[i] = String.valueOf(redisPorts[i]);
+    }
+
+    DUnitSocketAddressResolver dnsResolver =
+        new DUnitSocketAddressResolver(redisPortsAsStrings);
+
+    ClientResources resources = ClientResources.builder()
+        .socketAddressResolver(dnsResolver)
+        .build();
+
+    redisClient = RedisClient.create(resources, "redis://localhost");
+    redisClient.setOptions(ClientOptions.builder()
+        .autoReconnect(true)
+        .build());
+
+    connection = redisClient.connect();
+    commands = connection.sync();
+    commands.hset(HASH_KEY, INITIAL_DATA_SET);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    commands.quit();
+
+    server1.stop();
+    server2.stop();
+    server3.stop();
+  }
+
+  @Test
+  public void 
should_allow_hscan_iteration_to_complete_successfully_given_server_crashes_during_iteration()
+      throws ExecutionException, InterruptedException {
+
+    AtomicBoolean keepCrashingVMs = new AtomicBoolean(true);
+    AtomicInteger numberOfTimesServersCrashed = new AtomicInteger(0);
+
+    Future<Void> hScanFuture =
+        executor.runAsync(
+            () -> doHScanContinuallyAndAssertOnResults(keepCrashingVMs,
+                numberOfTimesServersCrashed));
+
+    Future<Void> crashingVmFuture =
+        executor.runAsync(
+            () -> crashAlternatingServers(keepCrashingVMs, 
numberOfTimesServersCrashed));
+
+
+    hScanFuture.get();
+    crashingVmFuture.get();
+  }
+
+
+  private static void doHScanContinuallyAndAssertOnResults(AtomicBoolean 
keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+    int numberOfAssertionsCompleted = 0;
+
+    ScanCursor scanCursor = new ScanCursor("0", false);
+    List<String> allEntries = new ArrayList<>();
+    MapScanCursor<String, String> result;
+
+    while (numberOfAssertionsCompleted < 3 || 
numberOfTimesServersCrashed.get() < 3) {

Review comment:
       the numberOfTimesServersCrashed is an atomic boolean set in another 
method (as servers crash), so that couldn't be in the same for loop.  The idea 
is to make sure that at least 3 assertions have been done, and the servers have 
been restarted at least 3 times.  I guess the numbers of assertions part could 
be in a for loop, but I thin it might be more confusing that way?  let me know 
if you still think it should be changed.     

##########
File path: 
geode-redis/src/distributedTest/java/org/apache/geode/redis/internal/executor/hash/HScanDunitTest.java
##########
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.hash;
+
+import static 
org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_BIND_ADDRESS;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_ENABLED;
+import static org.apache.geode.distributed.ConfigurationProperties.REDIS_PORT;
+import static 
org.apache.geode.redis.internal.GeodeRedisServer.ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import io.lettuce.core.ClientOptions;
+import io.lettuce.core.MapScanCursor;
+import io.lettuce.core.RedisClient;
+import io.lettuce.core.RedisCommandExecutionException;
+import io.lettuce.core.RedisException;
+import io.lettuce.core.ScanCursor;
+import io.lettuce.core.api.StatefulRedisConnection;
+import io.lettuce.core.api.sync.RedisCommands;
+import io.lettuce.core.resource.ClientResources;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.control.RebalanceFactory;
+import org.apache.geode.cache.control.ResourceManager;
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.AvailablePortHelper;
+import 
org.apache.geode.redis.session.springRedisTestApplication.config.DUnitSocketAddressResolver;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+
+public class HScanDunitTest {
+  @ClassRule
+  public static ClusterStartupRule clusterStartUp = new ClusterStartupRule(4);
+
+  @Rule
+  public ExecutorServiceRule executor = new ExecutorServiceRule();
+
+  @ClassRule
+  public static GfshCommandRule gfsh = new GfshCommandRule();
+
+  private static RedisCommands<String, String> commands;
+  private RedisClient redisClient;
+  private StatefulRedisConnection<String, String> connection;
+  private static Properties locatorProperties;
+
+  private static MemberVM locator;
+  private static MemberVM server1;
+  private static MemberVM server2;
+  private static MemberVM server3;
+
+  static final String HASH_KEY = "key";
+  static final String BASE_FIELD = "baseField_";
+  static final Map<String, String> INITIAL_DATA_SET = makeEntrySet(1000);
+
+  static int[] redisPorts;
+
+  @BeforeClass
+  public static void classSetup() throws Exception {
+    int locatorPort;
+    locatorProperties = new Properties();
+    locatorProperties.setProperty(MAX_WAIT_TIME_RECONNECT, "15000");
+
+    locator = clusterStartUp.startLocatorVM(0, locatorProperties);
+    locatorPort = locator.getPort();
+    redisPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+
+    // note: due to rules around member weighting in split-brain scenarios,
+    // vm1 (server1) should not be crashed or it will cause additional 
(unrelated) failures
+    String redisPort1 = redisPorts[0] + "";
+    server1 =
+        clusterStartUp.startServerVM(1,
+            x -> x.withProperty(REDIS_PORT, redisPort1)
+                .withProperty(REDIS_ENABLED, "true")
+                .withProperty(REDIS_BIND_ADDRESS, "localhost")
+                
.withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                    "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+                .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+                .withConnectionToLocator(locatorPort));
+
+    String redisPort2 = redisPorts[1] + "";
+    server2 = clusterStartUp.startServerVM(2,
+        x -> x.withProperty(REDIS_PORT, redisPort2)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    String redisPort3 = redisPorts[2] + "";
+    server3 = clusterStartUp.startServerVM(3,
+        x -> x.withProperty(REDIS_PORT, redisPort3)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    gfsh.connectAndVerify(locator);
+
+  }
+
+  @Before
+  public void testSetup() {
+    addIgnoredException(FunctionException.class);
+    String[] redisPortsAsStrings = new String[redisPorts.length];
+
+    for (int i = 0; i < redisPorts.length; i++) {
+      redisPortsAsStrings[i] = String.valueOf(redisPorts[i]);
+    }
+
+    DUnitSocketAddressResolver dnsResolver =
+        new DUnitSocketAddressResolver(redisPortsAsStrings);
+
+    ClientResources resources = ClientResources.builder()
+        .socketAddressResolver(dnsResolver)
+        .build();
+
+    redisClient = RedisClient.create(resources, "redis://localhost");
+    redisClient.setOptions(ClientOptions.builder()
+        .autoReconnect(true)
+        .build());
+
+    connection = redisClient.connect();
+    commands = connection.sync();
+    commands.hset(HASH_KEY, INITIAL_DATA_SET);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    commands.quit();
+
+    server1.stop();
+    server2.stop();
+    server3.stop();
+  }
+
+  @Test
+  public void 
should_allow_hscan_iteration_to_complete_successfully_given_server_crashes_during_iteration()
+      throws ExecutionException, InterruptedException {
+
+    AtomicBoolean keepCrashingVMs = new AtomicBoolean(true);
+    AtomicInteger numberOfTimesServersCrashed = new AtomicInteger(0);
+
+    Future<Void> hScanFuture =
+        executor.runAsync(
+            () -> doHScanContinuallyAndAssertOnResults(keepCrashingVMs,
+                numberOfTimesServersCrashed));
+
+    Future<Void> crashingVmFuture =
+        executor.runAsync(
+            () -> crashAlternatingServers(keepCrashingVMs, 
numberOfTimesServersCrashed));
+
+
+    hScanFuture.get();
+    crashingVmFuture.get();
+  }
+
+
+  private static void doHScanContinuallyAndAssertOnResults(AtomicBoolean 
keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+    int numberOfAssertionsCompleted = 0;
+
+    ScanCursor scanCursor = new ScanCursor("0", false);
+    List<String> allEntries = new ArrayList<>();
+    MapScanCursor<String, String> result;
+
+    while (numberOfAssertionsCompleted < 3 || 
numberOfTimesServersCrashed.get() < 3) {
+
+      allEntries.clear();
+      scanCursor.setCursor("0");
+      scanCursor.setFinished(false);
+
+      try {
+        do {
+          result = commands.hscan(HASH_KEY, scanCursor);
+          scanCursor.setCursor(result.getCursor());
+          Map<String, String> resultEntries = result.getMap();
+
+          resultEntries
+              .entrySet()
+              .forEach(
+                  entry -> allEntries.add(entry.getKey()));
+
+        } while (!result.isFinished());
+
+        assertThat(allEntries).containsAll(INITIAL_DATA_SET.keySet());
+        numberOfAssertionsCompleted++;
+
+      } catch (RedisCommandExecutionException ignore) {
+      } catch (RedisException ex) {
+        if (ex.getMessage().contains("Connection reset by peer")) {// ignore 
error
+        } else {
+          throw ex;
+        }
+      }
+    }
+    keepCrashingServers.set(false);
+  }
+
+
+
+  private void crashAlternatingServers(AtomicBoolean keepCrashingServers,
+      AtomicInteger numberOfTimesServersCrashed) {
+
+    int serverToCrashToggle = 3;
+    MemberVM server = null;
+    int redisPort;
+
+    do {
+      redisPort = redisPorts[serverToCrashToggle - 1];
+      if (serverToCrashToggle == 3) {
+        server = server3;

Review comment:
       > yeah, you're right-  looking again this whole section is not really 
needed anymore.  Thanks! 

##########
File path: 
geode-redis/src/distributedTest/java/org/apache/geode/redis/internal/executor/hash/HScanDunitTest.java
##########
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.hash;
+
+import static 
org.apache.geode.distributed.ConfigurationProperties.MAX_WAIT_TIME_RECONNECT;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_BIND_ADDRESS;
+import static 
org.apache.geode.distributed.ConfigurationProperties.REDIS_ENABLED;
+import static org.apache.geode.distributed.ConfigurationProperties.REDIS_PORT;
+import static 
org.apache.geode.redis.internal.GeodeRedisServer.ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import io.lettuce.core.ClientOptions;
+import io.lettuce.core.MapScanCursor;
+import io.lettuce.core.RedisClient;
+import io.lettuce.core.RedisCommandExecutionException;
+import io.lettuce.core.RedisException;
+import io.lettuce.core.ScanCursor;
+import io.lettuce.core.api.StatefulRedisConnection;
+import io.lettuce.core.api.sync.RedisCommands;
+import io.lettuce.core.resource.ClientResources;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.control.RebalanceFactory;
+import org.apache.geode.cache.control.ResourceManager;
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.AvailablePortHelper;
+import 
org.apache.geode.redis.session.springRedisTestApplication.config.DUnitSocketAddressResolver;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+
+public class HScanDunitTest {
+  @ClassRule
+  public static ClusterStartupRule clusterStartUp = new ClusterStartupRule(4);
+
+  @Rule
+  public ExecutorServiceRule executor = new ExecutorServiceRule();
+
+  @ClassRule
+  public static GfshCommandRule gfsh = new GfshCommandRule();
+
+  private static RedisCommands<String, String> commands;
+  private RedisClient redisClient;
+  private StatefulRedisConnection<String, String> connection;
+  private static Properties locatorProperties;
+
+  private static MemberVM locator;
+  private static MemberVM server1;
+  private static MemberVM server2;
+  private static MemberVM server3;
+
+  static final String HASH_KEY = "key";
+  static final String BASE_FIELD = "baseField_";
+  static final Map<String, String> INITIAL_DATA_SET = makeEntrySet(1000);
+
+  static int[] redisPorts;
+
+  @BeforeClass
+  public static void classSetup() throws Exception {
+    int locatorPort;
+    locatorProperties = new Properties();
+    locatorProperties.setProperty(MAX_WAIT_TIME_RECONNECT, "15000");
+
+    locator = clusterStartUp.startLocatorVM(0, locatorProperties);
+    locatorPort = locator.getPort();
+    redisPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
+
+    // note: due to rules around member weighting in split-brain scenarios,
+    // vm1 (server1) should not be crashed or it will cause additional 
(unrelated) failures
+    String redisPort1 = redisPorts[0] + "";
+    server1 =
+        clusterStartUp.startServerVM(1,
+            x -> x.withProperty(REDIS_PORT, redisPort1)
+                .withProperty(REDIS_ENABLED, "true")
+                .withProperty(REDIS_BIND_ADDRESS, "localhost")
+                
.withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                    "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+                .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+                .withConnectionToLocator(locatorPort));
+
+    String redisPort2 = redisPorts[1] + "";
+    server2 = clusterStartUp.startServerVM(2,
+        x -> x.withProperty(REDIS_PORT, redisPort2)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    String redisPort3 = redisPorts[2] + "";
+    server3 = clusterStartUp.startServerVM(3,
+        x -> x.withProperty(REDIS_PORT, redisPort3)
+            .withProperty(REDIS_ENABLED, "true")
+            .withProperty(REDIS_BIND_ADDRESS, "localhost")
+            .withProperty(ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER,
+                "org.apache.commons.lang3.tuple.**;org.apache.geode.**")
+            .withSystemProperty(ENABLE_REDIS_UNSUPPORTED_COMMANDS_PARAM, 
"true")
+            .withConnectionToLocator(locatorPort));
+
+    gfsh.connectAndVerify(locator);
+
+  }
+
+  @Before
+  public void testSetup() {
+    addIgnoredException(FunctionException.class);
+    String[] redisPortsAsStrings = new String[redisPorts.length];
+
+    for (int i = 0; i < redisPorts.length; i++) {
+      redisPortsAsStrings[i] = String.valueOf(redisPorts[i]);
+    }
+
+    DUnitSocketAddressResolver dnsResolver =
+        new DUnitSocketAddressResolver(redisPortsAsStrings);
+
+    ClientResources resources = ClientResources.builder()
+        .socketAddressResolver(dnsResolver)
+        .build();
+
+    redisClient = RedisClient.create(resources, "redis://localhost");
+    redisClient.setOptions(ClientOptions.builder()
+        .autoReconnect(true)
+        .build());
+
+    connection = redisClient.connect();
+    commands = connection.sync();
+    commands.hset(HASH_KEY, INITIAL_DATA_SET);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    commands.quit();
+
+    server1.stop();
+    server2.stop();
+    server3.stop();
+  }
+
+  @Test
+  public void 
should_allow_hscan_iteration_to_complete_successfully_given_server_crashes_during_iteration()
+      throws ExecutionException, InterruptedException {
+
+    AtomicBoolean keepCrashingVMs = new AtomicBoolean(true);
+    AtomicInteger numberOfTimesServersCrashed = new AtomicInteger(0);
+
+    Future<Void> hScanFuture =

Review comment:
       I'll take a quick look see if I find any.  Let me know if you see any I 
miss.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


> finish implementation of Redis HScan Command
> --------------------------------------------
>
>                 Key: GEODE-8864
>                 URL: https://issues.apache.org/jira/browse/GEODE-8864
>             Project: Geode
>          Issue Type: New Feature
>          Components: redis
>            Reporter: John Hutchison
>            Priority: Major
>              Labels: pull-request-available
>




--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to