This is an automated email from the ASF dual-hosted git repository.

zhouxj pushed a commit to branch feature/GEODE-4788
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 378d97aed9f23c237b9616c78e17bdb1fea2c21f
Author: zhouxh <gz...@pivotal.io>
AuthorDate: Tue Mar 6 12:11:52 2018 -0800

    GEODE-4788: change back the behavior of test code of getSomeKeys to ignore 
exceptions.
---
 .../cache/PartitionedRegionGetSomeKeys.java        | 23 ++++++++++++++++++----
 1 file changed, 19 insertions(+), 4 deletions(-)

diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionGetSomeKeys.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionGetSomeKeys.java
index bcce7df..a02dedb 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionGetSomeKeys.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionGetSomeKeys.java
@@ -20,17 +20,22 @@ import java.util.Collections;
 import java.util.Random;
 import java.util.Set;
 
+import org.apache.logging.log4j.Logger;
+
 import 
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.partitioned.FetchKeysMessage;
 import 
org.apache.geode.internal.cache.partitioned.FetchKeysMessage.FetchKeysResponse;
 import org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException;
 import org.apache.geode.internal.cache.tier.InterestType;
+import org.apache.geode.internal.logging.LogService;
 
 /**
  * Extracted from {@link PartitionedRegion}. This is a utility used by Hydra 
test code only.
  */
 public class PartitionedRegionGetSomeKeys {
 
+  private static final Logger logger = LogService.getLogger();
+
   /**
    * Test Method: Get a random set of keys from a randomly selected bucket 
using the provided
    * {@code Random} number generator.
@@ -42,7 +47,8 @@ public class PartitionedRegionGetSomeKeys {
     Set<Integer> bucketIdSet = 
partitionedRegion.getRegionAdvisor().getBucketSet();
 
     if (bucketIdSet != null && !bucketIdSet.isEmpty()) {
-      Integer[] bucketIds = bucketIdSet.toArray(new 
Integer[bucketIdSet.size()]);
+      Object[] bucketIds = bucketIdSet.toArray();
+      Integer bucketId = null;
       Set<?> someKeys;
 
       // Randomly pick a node to get some data from
@@ -53,7 +59,7 @@ public class PartitionedRegionGetSomeKeys {
             // The GSRandom.nextInt(int) may return a value that includes the 
maximum.
             whichBucket = bucketIds.length - 1;
           }
-          int bucketId = bucketIds[whichBucket];
+          bucketId = (Integer) bucketIds[whichBucket];
 
           InternalDistributedMember member = 
partitionedRegion.getNodeForBucketRead(bucketId);
           if (member != null) {
@@ -70,8 +76,17 @@ public class PartitionedRegionGetSomeKeys {
               return someKeys;
             }
           }
-        } catch (ForceReattemptException | PRLocallyDestroyedException e) {
-          throw new RuntimeException(e);
+        } catch (ForceReattemptException movinOn) {
+          partitionedRegion.checkReadiness();
+          logger.debug(
+              "Test hook getSomeKeys caught a ForceReattemptException for 
bucketId={}{}{}. Moving on to another bucket",
+              partitionedRegion.getPRId(), 
partitionedRegion.BUCKET_ID_SEPARATOR, bucketId,
+              movinOn);
+          continue;
+        } catch (PRLocallyDestroyedException ignore) {
+          logger.debug("getSomeKeys: Encountered PRLocallyDestroyedException");
+          partitionedRegion.checkReadiness();
+          continue;
         }
       }
     }

-- 
To stop receiving notification emails like this one, please contact
zho...@apache.org.

Reply via email to