pivotal-jbarrett commented on code in PR #7493: URL: https://github.com/apache/geode/pull/7493#discussion_r859797400
########## geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java: ########## @@ -533,57 +522,56 @@ public String description() { public static void executeFunction() { - Region region = cache.getRegion(PartitionedRegionName); - assertNotNull(region); + Region<Object, Object> region = cache.getRegion(PartitionedRegionName); + assertThat(region).isNotNull(); final HashSet<String> testKeysSet = new HashSet<>(); for (int i = (totalNumBuckets * 10); i > 0; i--) { testKeysSet.add("execKey-" + i); } DistributedSystem.setThreadsSocketPolicy(false); - Function function = new TestFunction(true, TEST_FUNCTION2); + Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2); FunctionService.registerFunction(function); Execution dataSet = FunctionService.onRegion(region); try { - ResultCollector rc1 = + ResultCollector<?, ?> rc1 = dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId()); - HashMap resultMap = ((HashMap) rc1.getResult()); - assertEquals(3, resultMap.size()); + HashMap<?, ?> resultMap = ((HashMap<?, ?>) rc1.getResult()); + assertThat(resultMap).size().isEqualTo(3); Review Comment: Please use the AssertJ collection assertions. ```java assertThat(map).hasSize(3); ``` ########## geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java: ########## @@ -596,139 +584,123 @@ private static void putOperation() { } private void createScenario() { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private void createScenarioWithClientConnectTimeout(int connectTimeout, int maxThreads) { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20, maxThreads, connectTimeout); } private void createScenarioForBucketFilter() { - ArrayList commonAttributes = createCommonServerAttributes("TestPartitionedRegion", + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", new BucketFilterPRResolver(), 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private static void checkBucketsOnServer() { PartitionedRegion region = (PartitionedRegion) cache.getRegion(PartitionedRegionName); - HashMap localBucket2RegionMap = (HashMap) region.getDataStore().getSizeLocally(); + HashMap<Integer, Integer> localBucket2RegionMap = + (HashMap<Integer, Integer>) region.getDataStore().getSizeLocally(); logger.info( "Size of the " + PartitionedRegionName + " in this VM :- " + localBucket2RegionMap.size()); - Set entrySet = localBucket2RegionMap.entrySet(); - assertNotNull(entrySet); + Set<Map.Entry<Integer, Integer>> entrySet = localBucket2RegionMap.entrySet(); + assertThat(entrySet).isNotNull(); } private static void serverAllKeyExecution(Boolean isByName) { Region<String, Integer> region = cache.getRegion(PartitionedRegionName); - assertNotNull(region); + assertThat(region).isNotNull(); final HashSet<String> testKeysSet = new HashSet<>(); for (int i = (totalNumBuckets / 2); i > 0; i--) { testKeysSet.add("execKey-" + i); } DistributedSystem.setThreadsSocketPolicy(false); - Function function = new TestFunction(true, TEST_FUNCTION2); + Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2); FunctionService.registerFunction(function); Execution dataSet = FunctionService.onRegion(region); - try { - int j = 0; - HashSet<Integer> origVals = new HashSet<>(); - for (String item : testKeysSet) { - Integer val = j++; - origVals.add(val); - region.put(item, val); - } - ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); - List resultList = (List) rc1.getResult(); - logger.info("Result size : " + resultList.size()); - logger.info("Result are SSSS : " + resultList); - assertEquals(3, resultList.size()); - - for (Object result : resultList) { - assertEquals(Boolean.TRUE, result); - } - ResultCollector rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); - List l2 = ((List) rc2.getResult()); - assertEquals(3, l2.size()); - HashSet<Integer> foundVals = new HashSet<>(); - for (Object value : l2) { - ArrayList subL = (ArrayList) (value); - assertTrue(subL.size() > 0); - for (Object o : subL) { - assertTrue(foundVals.add((Integer) o)); - } - } - assertEquals(origVals, foundVals); - - } catch (Exception e) { - Assert.fail("Test failed after the put operation", e); + int j = 0; + HashSet<Integer> origVals = new HashSet<>(); + for (String item : testKeysSet) { + Integer val = j++; + origVals.add(val); + region.put(item, val); + } + ResultCollector<?, ?> rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); + List<?> resultList = (List<?>) rc1.getResult(); + logger.info("Result size : " + resultList.size()); + logger.info("Result are SSSS : " + resultList); + assertThat(resultList.size()).isEqualTo(3); Review Comment: `hasSize()` ########## geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java: ########## @@ -740,191 +712,163 @@ private static void serverMultiKeyExecutionOnASingleBucket(Boolean isByName) { } DistributedSystem.setThreadsSocketPolicy(false); for (String o : testKeysSet) { - try { - Set<String> singleKeySet = Collections.singleton(o); - Function function = new TestFunction(true, TEST_FUNCTION2); - FunctionService.registerFunction(function); - Execution dataSet = FunctionService.onRegion(region); - ResultCollector rc1 = execute(dataSet, singleKeySet, Boolean.TRUE, function, isByName); - List l = ((List) rc1.getResult()); - assertEquals(1, l.size()); - - ResultCollector rc2 = - execute(dataSet, singleKeySet, new HashSet<>(singleKeySet), function, isByName); - List l2 = ((List) rc2.getResult()); - - assertEquals(1, l2.size()); - List subList = (List) l2.iterator().next(); - assertEquals(1, subList.size()); - assertEquals(region.get(singleKeySet.iterator().next()), subList.iterator().next()); - } catch (Exception expected) { - logger.info("Exception : " + expected.getMessage()); - expected.printStackTrace(); - fail("Test failed after the put operation"); - } + Set<String> singleKeySet = Collections.singleton(o); + Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2); + FunctionService.registerFunction(function); + Execution dataSet = FunctionService.onRegion(region); + ResultCollector<?, ?> rc1 = execute(dataSet, singleKeySet, Boolean.TRUE, function, isByName); + List<?> l = (List<?>) rc1.getResult(); + assertThat(l.size()).isEqualTo(1); + + ResultCollector<?, ?> rc2 = + execute(dataSet, singleKeySet, new HashSet<>(singleKeySet), function, isByName); + List<?> l2 = (List<?>) rc2.getResult(); + + assertThat(l2.size()).isEqualTo(1); + List<?> subList = (List<?>) l2.iterator().next(); + assertThat(subList.size()).isEqualTo(1); + assertThat(subList.iterator().next()).isEqualTo(region.get(singleKeySet.iterator().next())); Review Comment: `assertThat(subList).containsOnly(singleKeySet)`? ########## geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java: ########## @@ -596,139 +584,123 @@ private static void putOperation() { } private void createScenario() { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private void createScenarioWithClientConnectTimeout(int connectTimeout, int maxThreads) { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20, maxThreads, connectTimeout); } private void createScenarioForBucketFilter() { - ArrayList commonAttributes = createCommonServerAttributes("TestPartitionedRegion", + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", new BucketFilterPRResolver(), 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private static void checkBucketsOnServer() { PartitionedRegion region = (PartitionedRegion) cache.getRegion(PartitionedRegionName); - HashMap localBucket2RegionMap = (HashMap) region.getDataStore().getSizeLocally(); + HashMap<Integer, Integer> localBucket2RegionMap = + (HashMap<Integer, Integer>) region.getDataStore().getSizeLocally(); logger.info( "Size of the " + PartitionedRegionName + " in this VM :- " + localBucket2RegionMap.size()); - Set entrySet = localBucket2RegionMap.entrySet(); - assertNotNull(entrySet); + Set<Map.Entry<Integer, Integer>> entrySet = localBucket2RegionMap.entrySet(); + assertThat(entrySet).isNotNull(); } private static void serverAllKeyExecution(Boolean isByName) { Region<String, Integer> region = cache.getRegion(PartitionedRegionName); - assertNotNull(region); + assertThat(region).isNotNull(); final HashSet<String> testKeysSet = new HashSet<>(); for (int i = (totalNumBuckets / 2); i > 0; i--) { testKeysSet.add("execKey-" + i); } DistributedSystem.setThreadsSocketPolicy(false); - Function function = new TestFunction(true, TEST_FUNCTION2); + Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2); FunctionService.registerFunction(function); Execution dataSet = FunctionService.onRegion(region); - try { - int j = 0; - HashSet<Integer> origVals = new HashSet<>(); - for (String item : testKeysSet) { - Integer val = j++; - origVals.add(val); - region.put(item, val); - } - ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); - List resultList = (List) rc1.getResult(); - logger.info("Result size : " + resultList.size()); - logger.info("Result are SSSS : " + resultList); - assertEquals(3, resultList.size()); - - for (Object result : resultList) { - assertEquals(Boolean.TRUE, result); - } - ResultCollector rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); - List l2 = ((List) rc2.getResult()); - assertEquals(3, l2.size()); - HashSet<Integer> foundVals = new HashSet<>(); - for (Object value : l2) { - ArrayList subL = (ArrayList) (value); - assertTrue(subL.size() > 0); - for (Object o : subL) { - assertTrue(foundVals.add((Integer) o)); - } - } - assertEquals(origVals, foundVals); - - } catch (Exception e) { - Assert.fail("Test failed after the put operation", e); + int j = 0; + HashSet<Integer> origVals = new HashSet<>(); + for (String item : testKeysSet) { + Integer val = j++; + origVals.add(val); + region.put(item, val); + } + ResultCollector<?, ?> rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); + List<?> resultList = (List<?>) rc1.getResult(); + logger.info("Result size : " + resultList.size()); + logger.info("Result are SSSS : " + resultList); + assertThat(resultList.size()).isEqualTo(3); + + for (Object result : resultList) { + assertThat(result).isEqualTo(Boolean.TRUE); + } + ResultCollector<?, ?> rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); + List<?> l2 = (List<?>) rc2.getResult(); + assertThat(l2.size()).isEqualTo(3); + HashSet<Integer> foundVals = new HashSet<>(); + for (Object value : l2) { + List<?> subL = (List<?>) value; + assertThat(subL.size()).isGreaterThan(0); + for (Object o : subL) { + assertThat(foundVals.add((Integer) o)).isTrue(); + } } + assertThat(foundVals).isEqualTo(origVals); } public static void getAll() { Region<String, Integer> region = cache.getRegion(PartitionedRegionName); - assertNotNull(region); + assertThat(region).isNotNull(); final List<String> testKeysList = new ArrayList<>(); for (int i = (totalNumBuckets * 3); i > 0; i--) { testKeysList.add("execKey-" + i); } DistributedSystem.setThreadsSocketPolicy(false); - try { - int j = 0; - Map<String, Integer> origVals = new HashMap<>(); - for (String key : testKeysList) { - Integer val = j++; - origVals.put(key, val); - region.put(key, val); - } - Map resultMap = region.getAll(testKeysList); - assertEquals(resultMap, origVals); - Wait.pause(2000); - Map secondResultMap = region.getAll(testKeysList); - assertEquals(secondResultMap, origVals); - - } catch (Exception e) { - Assert.fail("Test failed after the put operation", e); - + int j = 0; + Map<String, Integer> origVals = new HashMap<>(); + for (String key : testKeysList) { + Integer val = j++; + origVals.put(key, val); + region.put(key, val); } + Map<String, Integer> resultMap = region.getAll(testKeysList); + assertThat(resultMap).isEqualTo(origVals); Review Comment: I think what you really want is `containsOnly()`. The failure of `equalsTo` will just inform us that the two values are not equal but `containsOnly()` will break it down to the exact entries. ########## geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java: ########## @@ -596,139 +584,123 @@ private static void putOperation() { } private void createScenario() { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private void createScenarioWithClientConnectTimeout(int connectTimeout, int maxThreads) { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20, maxThreads, connectTimeout); } private void createScenarioForBucketFilter() { - ArrayList commonAttributes = createCommonServerAttributes("TestPartitionedRegion", + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", new BucketFilterPRResolver(), 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private static void checkBucketsOnServer() { PartitionedRegion region = (PartitionedRegion) cache.getRegion(PartitionedRegionName); - HashMap localBucket2RegionMap = (HashMap) region.getDataStore().getSizeLocally(); + HashMap<Integer, Integer> localBucket2RegionMap = + (HashMap<Integer, Integer>) region.getDataStore().getSizeLocally(); logger.info( "Size of the " + PartitionedRegionName + " in this VM :- " + localBucket2RegionMap.size()); - Set entrySet = localBucket2RegionMap.entrySet(); - assertNotNull(entrySet); + Set<Map.Entry<Integer, Integer>> entrySet = localBucket2RegionMap.entrySet(); + assertThat(entrySet).isNotNull(); } private static void serverAllKeyExecution(Boolean isByName) { Region<String, Integer> region = cache.getRegion(PartitionedRegionName); - assertNotNull(region); + assertThat(region).isNotNull(); final HashSet<String> testKeysSet = new HashSet<>(); for (int i = (totalNumBuckets / 2); i > 0; i--) { testKeysSet.add("execKey-" + i); } DistributedSystem.setThreadsSocketPolicy(false); - Function function = new TestFunction(true, TEST_FUNCTION2); + Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2); FunctionService.registerFunction(function); Execution dataSet = FunctionService.onRegion(region); - try { - int j = 0; - HashSet<Integer> origVals = new HashSet<>(); - for (String item : testKeysSet) { - Integer val = j++; - origVals.add(val); - region.put(item, val); - } - ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); - List resultList = (List) rc1.getResult(); - logger.info("Result size : " + resultList.size()); - logger.info("Result are SSSS : " + resultList); - assertEquals(3, resultList.size()); - - for (Object result : resultList) { - assertEquals(Boolean.TRUE, result); - } - ResultCollector rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); - List l2 = ((List) rc2.getResult()); - assertEquals(3, l2.size()); - HashSet<Integer> foundVals = new HashSet<>(); - for (Object value : l2) { - ArrayList subL = (ArrayList) (value); - assertTrue(subL.size() > 0); - for (Object o : subL) { - assertTrue(foundVals.add((Integer) o)); - } - } - assertEquals(origVals, foundVals); - - } catch (Exception e) { - Assert.fail("Test failed after the put operation", e); + int j = 0; + HashSet<Integer> origVals = new HashSet<>(); + for (String item : testKeysSet) { + Integer val = j++; + origVals.add(val); + region.put(item, val); + } + ResultCollector<?, ?> rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); + List<?> resultList = (List<?>) rc1.getResult(); + logger.info("Result size : " + resultList.size()); + logger.info("Result are SSSS : " + resultList); + assertThat(resultList.size()).isEqualTo(3); + + for (Object result : resultList) { + assertThat(result).isEqualTo(Boolean.TRUE); + } + ResultCollector<?, ?> rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); + List<?> l2 = (List<?>) rc2.getResult(); + assertThat(l2.size()).isEqualTo(3); Review Comment: `hasSize()` ########## geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java: ########## @@ -596,139 +584,123 @@ private static void putOperation() { } private void createScenario() { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private void createScenarioWithClientConnectTimeout(int connectTimeout, int maxThreads) { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20, maxThreads, connectTimeout); } private void createScenarioForBucketFilter() { - ArrayList commonAttributes = createCommonServerAttributes("TestPartitionedRegion", + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", new BucketFilterPRResolver(), 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private static void checkBucketsOnServer() { PartitionedRegion region = (PartitionedRegion) cache.getRegion(PartitionedRegionName); - HashMap localBucket2RegionMap = (HashMap) region.getDataStore().getSizeLocally(); + HashMap<Integer, Integer> localBucket2RegionMap = + (HashMap<Integer, Integer>) region.getDataStore().getSizeLocally(); logger.info( "Size of the " + PartitionedRegionName + " in this VM :- " + localBucket2RegionMap.size()); - Set entrySet = localBucket2RegionMap.entrySet(); - assertNotNull(entrySet); + Set<Map.Entry<Integer, Integer>> entrySet = localBucket2RegionMap.entrySet(); + assertThat(entrySet).isNotNull(); } private static void serverAllKeyExecution(Boolean isByName) { Region<String, Integer> region = cache.getRegion(PartitionedRegionName); - assertNotNull(region); + assertThat(region).isNotNull(); final HashSet<String> testKeysSet = new HashSet<>(); for (int i = (totalNumBuckets / 2); i > 0; i--) { testKeysSet.add("execKey-" + i); } DistributedSystem.setThreadsSocketPolicy(false); - Function function = new TestFunction(true, TEST_FUNCTION2); + Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2); FunctionService.registerFunction(function); Execution dataSet = FunctionService.onRegion(region); - try { - int j = 0; - HashSet<Integer> origVals = new HashSet<>(); - for (String item : testKeysSet) { - Integer val = j++; - origVals.add(val); - region.put(item, val); - } - ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); - List resultList = (List) rc1.getResult(); - logger.info("Result size : " + resultList.size()); - logger.info("Result are SSSS : " + resultList); - assertEquals(3, resultList.size()); - - for (Object result : resultList) { - assertEquals(Boolean.TRUE, result); - } - ResultCollector rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); - List l2 = ((List) rc2.getResult()); - assertEquals(3, l2.size()); - HashSet<Integer> foundVals = new HashSet<>(); - for (Object value : l2) { - ArrayList subL = (ArrayList) (value); - assertTrue(subL.size() > 0); - for (Object o : subL) { - assertTrue(foundVals.add((Integer) o)); - } - } - assertEquals(origVals, foundVals); - - } catch (Exception e) { - Assert.fail("Test failed after the put operation", e); + int j = 0; + HashSet<Integer> origVals = new HashSet<>(); + for (String item : testKeysSet) { + Integer val = j++; + origVals.add(val); + region.put(item, val); + } + ResultCollector<?, ?> rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); + List<?> resultList = (List<?>) rc1.getResult(); + logger.info("Result size : " + resultList.size()); Review Comment: Let's avoid adding more logging to tests. If it is worth logging it is worth asserting and you don't need both. ########## geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java: ########## @@ -596,139 +584,123 @@ private static void putOperation() { } private void createScenario() { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private void createScenarioWithClientConnectTimeout(int connectTimeout, int maxThreads) { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20, maxThreads, connectTimeout); } private void createScenarioForBucketFilter() { - ArrayList commonAttributes = createCommonServerAttributes("TestPartitionedRegion", + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", new BucketFilterPRResolver(), 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private static void checkBucketsOnServer() { PartitionedRegion region = (PartitionedRegion) cache.getRegion(PartitionedRegionName); - HashMap localBucket2RegionMap = (HashMap) region.getDataStore().getSizeLocally(); + HashMap<Integer, Integer> localBucket2RegionMap = + (HashMap<Integer, Integer>) region.getDataStore().getSizeLocally(); logger.info( "Size of the " + PartitionedRegionName + " in this VM :- " + localBucket2RegionMap.size()); - Set entrySet = localBucket2RegionMap.entrySet(); - assertNotNull(entrySet); + Set<Map.Entry<Integer, Integer>> entrySet = localBucket2RegionMap.entrySet(); + assertThat(entrySet).isNotNull(); } private static void serverAllKeyExecution(Boolean isByName) { Region<String, Integer> region = cache.getRegion(PartitionedRegionName); - assertNotNull(region); + assertThat(region).isNotNull(); final HashSet<String> testKeysSet = new HashSet<>(); for (int i = (totalNumBuckets / 2); i > 0; i--) { testKeysSet.add("execKey-" + i); } DistributedSystem.setThreadsSocketPolicy(false); - Function function = new TestFunction(true, TEST_FUNCTION2); + Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2); FunctionService.registerFunction(function); Execution dataSet = FunctionService.onRegion(region); - try { - int j = 0; - HashSet<Integer> origVals = new HashSet<>(); - for (String item : testKeysSet) { - Integer val = j++; - origVals.add(val); - region.put(item, val); - } - ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); - List resultList = (List) rc1.getResult(); - logger.info("Result size : " + resultList.size()); - logger.info("Result are SSSS : " + resultList); - assertEquals(3, resultList.size()); - - for (Object result : resultList) { - assertEquals(Boolean.TRUE, result); - } - ResultCollector rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); - List l2 = ((List) rc2.getResult()); - assertEquals(3, l2.size()); - HashSet<Integer> foundVals = new HashSet<>(); - for (Object value : l2) { - ArrayList subL = (ArrayList) (value); - assertTrue(subL.size() > 0); - for (Object o : subL) { - assertTrue(foundVals.add((Integer) o)); - } - } - assertEquals(origVals, foundVals); - - } catch (Exception e) { - Assert.fail("Test failed after the put operation", e); + int j = 0; + HashSet<Integer> origVals = new HashSet<>(); + for (String item : testKeysSet) { + Integer val = j++; + origVals.add(val); + region.put(item, val); + } + ResultCollector<?, ?> rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); + List<?> resultList = (List<?>) rc1.getResult(); + logger.info("Result size : " + resultList.size()); + logger.info("Result are SSSS : " + resultList); + assertThat(resultList.size()).isEqualTo(3); + + for (Object result : resultList) { + assertThat(result).isEqualTo(Boolean.TRUE); + } + ResultCollector<?, ?> rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); + List<?> l2 = (List<?>) rc2.getResult(); + assertThat(l2.size()).isEqualTo(3); + HashSet<Integer> foundVals = new HashSet<>(); + for (Object value : l2) { + List<?> subL = (List<?>) value; + assertThat(subL.size()).isGreaterThan(0); Review Comment: `hasSizeGreaterThan()` ########## geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java: ########## @@ -596,139 +584,123 @@ private static void putOperation() { } private void createScenario() { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private void createScenarioWithClientConnectTimeout(int connectTimeout, int maxThreads) { - ArrayList commonAttributes = + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20, maxThreads, connectTimeout); } private void createScenarioForBucketFilter() { - ArrayList commonAttributes = createCommonServerAttributes("TestPartitionedRegion", + ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion", new BucketFilterPRResolver(), 0, null); createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20); } private static void checkBucketsOnServer() { PartitionedRegion region = (PartitionedRegion) cache.getRegion(PartitionedRegionName); - HashMap localBucket2RegionMap = (HashMap) region.getDataStore().getSizeLocally(); + HashMap<Integer, Integer> localBucket2RegionMap = + (HashMap<Integer, Integer>) region.getDataStore().getSizeLocally(); logger.info( "Size of the " + PartitionedRegionName + " in this VM :- " + localBucket2RegionMap.size()); - Set entrySet = localBucket2RegionMap.entrySet(); - assertNotNull(entrySet); + Set<Map.Entry<Integer, Integer>> entrySet = localBucket2RegionMap.entrySet(); + assertThat(entrySet).isNotNull(); } private static void serverAllKeyExecution(Boolean isByName) { Region<String, Integer> region = cache.getRegion(PartitionedRegionName); - assertNotNull(region); + assertThat(region).isNotNull(); final HashSet<String> testKeysSet = new HashSet<>(); for (int i = (totalNumBuckets / 2); i > 0; i--) { testKeysSet.add("execKey-" + i); } DistributedSystem.setThreadsSocketPolicy(false); - Function function = new TestFunction(true, TEST_FUNCTION2); + Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2); FunctionService.registerFunction(function); Execution dataSet = FunctionService.onRegion(region); - try { - int j = 0; - HashSet<Integer> origVals = new HashSet<>(); - for (String item : testKeysSet) { - Integer val = j++; - origVals.add(val); - region.put(item, val); - } - ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); - List resultList = (List) rc1.getResult(); - logger.info("Result size : " + resultList.size()); - logger.info("Result are SSSS : " + resultList); - assertEquals(3, resultList.size()); - - for (Object result : resultList) { - assertEquals(Boolean.TRUE, result); - } - ResultCollector rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); - List l2 = ((List) rc2.getResult()); - assertEquals(3, l2.size()); - HashSet<Integer> foundVals = new HashSet<>(); - for (Object value : l2) { - ArrayList subL = (ArrayList) (value); - assertTrue(subL.size() > 0); - for (Object o : subL) { - assertTrue(foundVals.add((Integer) o)); - } - } - assertEquals(origVals, foundVals); - - } catch (Exception e) { - Assert.fail("Test failed after the put operation", e); + int j = 0; + HashSet<Integer> origVals = new HashSet<>(); + for (String item : testKeysSet) { + Integer val = j++; + origVals.add(val); + region.put(item, val); + } + ResultCollector<?, ?> rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName); + List<?> resultList = (List<?>) rc1.getResult(); + logger.info("Result size : " + resultList.size()); + logger.info("Result are SSSS : " + resultList); + assertThat(resultList.size()).isEqualTo(3); + + for (Object result : resultList) { + assertThat(result).isEqualTo(Boolean.TRUE); + } + ResultCollector<?, ?> rc2 = executeOnAll(dataSet, testKeysSet, function, isByName); + List<?> l2 = (List<?>) rc2.getResult(); + assertThat(l2.size()).isEqualTo(3); + HashSet<Integer> foundVals = new HashSet<>(); + for (Object value : l2) { + List<?> subL = (List<?>) value; + assertThat(subL.size()).isGreaterThan(0); + for (Object o : subL) { + assertThat(foundVals.add((Integer) o)).isTrue(); + } } + assertThat(foundVals).isEqualTo(origVals); Review Comment: This code above looks like a form of AssertJ's collection `contains()` assertions. Perhaps we can replace it with one of those assertions. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: notifications-unsubscr...@geode.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org