This is an automated email from the ASF dual-hosted git repository.
klund pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git
The following commit(s) were added to refs/heads/develop by this push:
new 6b2d45a GEODE-4970: Overhaul and split up
PartitionedRegionQueryDUnitTest (#1718)
6b2d45a is described below
commit 6b2d45ac34663b1eeda6c2e921b73d1a114b2a9a
Author: Kirk Lund <[email protected]>
AuthorDate: Wed Apr 4 10:29:37 2018 -0700
GEODE-4970: Overhaul and split up PartitionedRegionQueryDUnitTest (#1718)
Rename:
* PartitionedRegionQueryDUnitTest -> PRQueryDistributedTest
Extract the following tests from PRQueryDistributedTest:
* PRQueryWithIndexAndPdxDistributedTest
* PRQueryWithIndexDistributedTest
* PRQueryWithOrderByDistributedTest
* PRQueryWithPdxDuringRebalanceRegressionTest
* PRWithIndexAfterRebalanceRegressionTest
Introduce and use:
* InternalClientCache interface
* ClientCacheRule
---
.../cache/client/internal/InternalClientCache.java | 6 +
.../internal/cache/PRQueryDistributedTest.java | 550 ++++++++
.../PRQueryWithIndexAndPdxDistributedTest.java | 241 ++++
.../cache/PRQueryWithIndexDistributedTest.java | 172 +++
.../cache/PRQueryWithOrderByDistributedTest.java | 217 ++++
...RQueryWithPdxDuringRebalanceRegressionTest.java | 163 +++
.../PRWithIndexAfterRebalanceRegressionTest.java | 180 +++
.../cache/PartitionedRegionQueryDUnitTest.java | 1359 --------------------
.../geode/test/dunit/rules/ClientCacheRule.java | 225 ++++
9 files changed, 1754 insertions(+), 1359 deletions(-)
diff --git
a/geode-core/src/main/java/org/apache/geode/cache/client/internal/InternalClientCache.java
b/geode-core/src/main/java/org/apache/geode/cache/client/internal/InternalClientCache.java
index ce5e9ba..fa345b9 100644
---
a/geode-core/src/main/java/org/apache/geode/cache/client/internal/InternalClientCache.java
+++
b/geode-core/src/main/java/org/apache/geode/cache/client/internal/InternalClientCache.java
@@ -21,6 +21,8 @@ import org.apache.geode.cache.TimeoutException;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.Pool;
import org.apache.geode.cache.client.PoolFactory;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.internal.cache.CachePerfStats;
public interface InternalClientCache extends ClientCache {
@@ -34,4 +36,8 @@ public interface InternalClientCache extends ClientCache {
throws RegionExistsException, TimeoutException;
Pool getDefaultPool();
+
+ InternalDistributedSystem getInternalDistributedSystem();
+
+ CachePerfStats getCachePerfStats();
}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryDistributedTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryDistributedTest.java
new file mode 100644
index 0000000..75e8377
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryDistributedTest.java
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.Region.SEPARATOR;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.DataSerializable;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.query.IndexExistsException;
+import org.apache.geode.cache.query.IndexNameConflictException;
+import org.apache.geode.cache.query.Query;
+import org.apache.geode.cache.query.RegionNotFoundException;
+import org.apache.geode.cache.query.SelectResults;
+import org.apache.geode.cache.query.internal.DefaultQuery;
+import org.apache.geode.cache.query.internal.DefaultQuery.TestHook;
+import org.apache.geode.cache.query.internal.index.IndexManager;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedTestRule;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
+
+/**
+ * Distributed tests for querying a PartitionedRegion.
+ */
+@Category(DistributedTest.class)
+@SuppressWarnings("serial")
+public class PRQueryDistributedTest implements Serializable {
+
+ private String regionName;
+
+ private VM vm0;
+ private VM vm1;
+ private VM vm2;
+
+ @ClassRule
+ public static DistributedTestRule distributedTestRule = new
DistributedTestRule();
+
+ @Rule
+ public CacheRule cacheRule = new CacheRule();
+
+ @Rule
+ public SerializableTestName testName = new SerializableTestName();
+
+ @Before
+ public void setUp() {
+ vm0 = getVM(0);
+ vm1 = getVM(1);
+ vm2 = getVM(2);
+
+ regionName = getClass().getSimpleName() + "_" + testName.getMethodName();
+ }
+
+ @After
+ public void tearDown() {
+ invokeInEveryVM(() -> {
+ DefaultQuery.QUERY_VERBOSE = false;
+ DefaultQuery.testHook = null;
+
+ // tearDown for IndexManager.setIndexBufferTime
+ IndexManager.SAFE_QUERY_TIME.set(0);
+ });
+ }
+
+ @Test
+ public void testReevaluationDueToUpdateInProgress() throws Exception {
+ vm0.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createPartitionedRegion());
+
+ vm0.invoke(() -> createIndex("compactRangeIndex", "entry.value",
+ SEPARATOR + regionName + ".entrySet entry"));
+
+ vm0.invoke("putting data", () -> {
+ Region region = cacheRule.getCache().getRegion(regionName);
+ for (int i = 0; i < 100; i++) {
+ region.put(i, new TestObject(i));
+ }
+ });
+
+ vm0.invoke("resetting sqt", () -> {
+ IndexManager.setIndexBufferTime(Long.MAX_VALUE, Long.MAX_VALUE);
+ });
+ vm1.invoke("resetting sqt", () -> {
+ IndexManager.setIndexBufferTime(Long.MAX_VALUE, Long.MAX_VALUE);
+ });
+
+ vm0.invoke("query", () -> {
+ cacheRule.getCache().getQueryService()
+ .newQuery("SELECT DISTINCT entry.key, entry.value FROM " + SEPARATOR
+ regionName
+ + ".entrySet entry WHERE entry.value.score >= 5 AND
entry.value.score <= 10 ORDER BY value asc")
+ .execute();
+ });
+ }
+
+ /**
+ * Tests trace for PR queries when {@code <trace>} is used and query verbose
is set to true on
+ * local and remote servers
+ */
+ @Test
+ public void testPartitionRegionDebugMessageQueryTraceOnBothServers() throws
Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+
+ vm1.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = true;
+ });
+ vm2.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = true;
+ });
+
+ vm1.invoke(() -> {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("<trace> select * from " + SEPARATOR + regionName + " r
where r > 0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ });
+
+ vm1.invoke(() -> {
+ PRQueryTraceTestHook server1TestHookInVM1 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server1TestHookInVM1.getHooks().get("Pull off PR Query Trace
Info")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
String")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
Info From Local Node"))
+ .isTrue();
+ });
+ vm2.invoke(() -> {
+ PRQueryTraceTestHook server2TestHookInVM2 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server2TestHookInVM2.getHooks().get("Populating Trace Info
for Remote Query"))
+ .isTrue();
+ assertThat(server2TestHookInVM2.getHooks().get("Create PR Query Trace
Info for Remote Query"))
+ .isTrue();
+ });
+ }
+
+ /**
+ * Tests trace for PR queries when {@code <trace>} is used and query verbose
is set to true on
+ * local but false on remote servers All flags should be true still as the
{@code <trace>} is
+ * OR'd with query verbose flag
+ */
+ @Test
+ public void testPartitionRegionDebugMessageQueryTraceOnLocalServerOnly()
throws Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+
+ vm1.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = true;
+ });
+ vm2.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+
+ vm1.invoke(() -> {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("<trace> select * from " + SEPARATOR + regionName + " r
where r > 0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ });
+
+ vm1.invoke(() -> {
+ PRQueryTraceTestHook server1TestHookInVM1 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server1TestHookInVM1.getHooks().get("Pull off PR Query Trace
Info")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
String")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
Info From Local Node"))
+ .isTrue();
+ });
+ vm2.invoke(() -> {
+ PRQueryTraceTestHook server2TestHookInVM2 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server2TestHookInVM2.getHooks().get("Populating Trace Info
for Remote Query"))
+ .isTrue();
+ assertThat(server2TestHookInVM2.getHooks().get("Create PR Query Trace
Info for Remote Query"))
+ .isTrue();
+ });
+ }
+
+ /**
+ * Tests trace for PR queries when {@code <trace>} is NOT used and query
verbose is set to true
+ * on local but false on remote. The remote should not send a pr query trace
info back because
+ * trace was not requested.
+ */
+ @Test
+ public void
testPartitionRegionDebugMessageQueryTraceOffLocalServerVerboseOn() throws
Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+
+ vm1.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = true;
+ });
+ vm2.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+
+ vm1.invoke(() -> {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("select * from " + SEPARATOR + regionName + " r where r >
0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ });
+
+ vm1.invoke(() -> {
+ PRQueryTraceTestHook server1TestHookInVM1 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server1TestHookInVM1.getHooks().get("Pull off PR Query Trace
Info")).isNull();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
String")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
Info From Local Node"))
+ .isTrue();
+ });
+ vm2.invoke(() -> {
+ PRQueryTraceTestHook server2TestHookInVM2 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server2TestHookInVM2.getHooks().get("Populating Trace Info
for Remote Query"))
+ .isNull();
+ assertThat(server2TestHookInVM2.getHooks().get("Create PR Query Trace
Info for Remote Query"))
+ .isNull();
+ });
+ }
+
+ /**
+ * Tests trace for PR queries when {@code <trace>} is NOT used and query
verbose is set to false
+ * on local but true on remote servers We don't output the string or do
anything on the local
+ * side, but we still pull off the object due to the remote server
generating and sending it over.
+ */
+ @Test
+ public void testPartitionRegionDebugMessageQueryTraceOffRemoteServerOnly()
throws Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+
+ vm1.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+ vm2.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = true;
+ });
+
+ vm1.invoke(() -> {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("select * from " + SEPARATOR + regionName + " r where r >
0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ });
+
+ vm1.invoke(() -> {
+ PRQueryTraceTestHook server1TestHookInVM1 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server1TestHookInVM1.getHooks().get("Pull off PR Query Trace
Info")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
String")).isNull();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
Info From Local Node"))
+ .isNull();
+ });
+ vm2.invoke(() -> {
+ PRQueryTraceTestHook server2TestHookInVM2 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server2TestHookInVM2.getHooks().get("Populating Trace Info
for Remote Query"))
+ .isTrue();
+ assertThat(server2TestHookInVM2.getHooks().get("Create PR Query Trace
Info for Remote Query"))
+ .isTrue();
+ });
+ }
+
+ /**
+ * Tests trace for PR queries when {@code <trace>} is used and query verbose
is set to false on
+ * local and remote servers trace is OR'd so the entire trace process should
be invoked.
+ */
+ @Test
+ public void testPartitionRegionDebugMessageQueryTraceOnRemoteServerOnly()
throws Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+
+ vm1.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+ vm2.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = true;
+ });
+
+ vm1.invoke(() -> {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("<trace> select * from " + SEPARATOR + regionName + " r
where r > 0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ });
+
+ vm1.invoke(() -> {
+ PRQueryTraceTestHook server1TestHookInVM1 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server1TestHookInVM1.getHooks().get("Pull off PR Query Trace
Info")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
String")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
Info From Local Node"))
+ .isTrue();
+ });
+ vm2.invoke(() -> {
+ PRQueryTraceTestHook server2TestHookInVM2 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server2TestHookInVM2.getHooks().get("Populating Trace Info
for Remote Query"))
+ .isTrue();
+ assertThat(server2TestHookInVM2.getHooks().get("Create PR Query Trace
Info for Remote Query"))
+ .isTrue();
+ });
+ }
+
+ /**
+ * Tests trace for PR queries when {@code <trace>} is NOT used and query
verbose is set to false
+ * on local but true remote servers The local node still receives the pr
trace info from the
+ * remote node due to query verbose being on however nothing is used on the
local side.
+ */
+ @Test
+ public void testPartitionRegionDebugMessageQueryTraceOffRemoteServerOn()
throws Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+
+ vm1.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+ vm2.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = true;
+ });
+
+ vm1.invoke(() -> {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("select * from " + SEPARATOR + regionName + " r where r >
0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ });
+
+ vm1.invoke(() -> {
+ PRQueryTraceTestHook server1TestHookInVM1 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server1TestHookInVM1.getHooks().get("Pull off PR Query Trace
Info")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
String")).isNull();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
Info From Local Node"))
+ .isNull();
+ });
+ vm2.invoke(() -> {
+ PRQueryTraceTestHook server2TestHookInVM2 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server2TestHookInVM2.getHooks().get("Populating Trace Info
for Remote Query"))
+ .isTrue();
+ assertThat(server2TestHookInVM2.getHooks().get("Create PR Query Trace
Info for Remote Query"))
+ .isTrue();
+ });
+ }
+
+ /**
+ * Tests trace for PR queries when {@code <trace>} is NOT used and query
verbose is set to false
+ * on local and remote servers None of our hooks should have triggered.
+ */
+ @Test
+ public void testPartitionRegionDebugMessageQueryTraceOffQueryVerboseOff()
throws Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+
+ vm1.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+ vm2.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+
+ vm1.invoke(() -> {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("select * from " + SEPARATOR + regionName + " r where r >
0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ });
+
+ vm1.invoke(() -> {
+ PRQueryTraceTestHook server1TestHookInVM1 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server1TestHookInVM1.getHooks().get("Pull off PR Query Trace
Info")).isNull();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
String")).isNull();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
Info From Local Node"))
+ .isNull();
+ });
+ vm2.invoke(() -> {
+ PRQueryTraceTestHook server2TestHookInVM2 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server2TestHookInVM2.getHooks().get("Populating Trace Info
for Remote Query"))
+ .isNull();
+ assertThat(server2TestHookInVM2.getHooks().get("Create PR Query Trace
Info for Remote Query"))
+ .isNull();
+ });
+ }
+
+ /**
+ * Test trace for PR queries when {@code <trace>} is used and query verbose
is set to false on
+ * local and remote servers. All hooks should have triggered due to trace
being used.
+ */
+ @Test
+ public void testPartitionRegionDebugMessageQueryTraceOnQueryVerboseOff()
throws Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+
+ vm1.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+ vm2.invoke(() -> {
+ DefaultQuery.testHook = new PRQueryTraceTestHook();
+ DefaultQuery.QUERY_VERBOSE = false;
+ });
+
+ vm1.invoke(() -> {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("<trace> select * from " + SEPARATOR + regionName + " r
where r > 0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ });
+
+ vm1.invoke(() -> {
+ PRQueryTraceTestHook server1TestHookInVM1 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server1TestHookInVM1.getHooks().get("Pull off PR Query Trace
Info")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
String")).isTrue();
+ assertThat(server1TestHookInVM1.getHooks().get("Create PR Query Trace
Info From Local Node"))
+ .isTrue();
+ });
+ vm2.invoke(() -> {
+ PRQueryTraceTestHook server2TestHookInVM2 = (PRQueryTraceTestHook)
DefaultQuery.testHook;
+ assertThat(server2TestHookInVM2.getHooks().get("Populating Trace Info
for Remote Query"))
+ .isTrue();
+ assertThat(server2TestHookInVM2.getHooks().get("Create PR Query Trace
Info for Remote Query"))
+ .isTrue();
+ });
+ }
+
+ private void createPartitionedRegion() {
+ cacheRule.createCache();
+ PartitionAttributesFactory paf = new
PartitionAttributesFactory().setTotalNumBuckets(10);
+ cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION)
+ .setPartitionAttributes(paf.create()).create(regionName);
+ }
+
+ private void createAccessor() {
+ cacheRule.createCache();
+ PartitionAttributesFactory paf =
+ new
PartitionAttributesFactory().setTotalNumBuckets(10).setLocalMaxMemory(0);
+ cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION_PROXY)
+ .setPartitionAttributes(paf.create()).create(regionName);
+ }
+
+ private void createBuckets() {
+ Region region = cacheRule.getCache().getRegion(regionName);
+ for (int i = 0; i < 10; i++) {
+ region.put(i, i);
+ }
+ }
+
+ private void createIndex(String indexName, String indexedExpression, String
regionPath)
+ throws IndexNameConflictException, IndexExistsException,
RegionNotFoundException {
+ cacheRule.getCache().getQueryService().createIndex(indexName,
indexedExpression, regionPath);
+ }
+
+ private static class TestObject implements DataSerializable, Comparable {
+
+ private Double score;
+
+ public TestObject() {
+ // nothing
+ }
+
+ public TestObject(double score) {
+ this.score = score;
+ }
+
+ @Override
+ public int compareTo(Object o) {
+ if (o instanceof TestObject) {
+ return score.compareTo(((TestObject) o).score);
+ }
+ return 1;
+ }
+
+ @Override
+ public void toData(DataOutput out) throws IOException {
+ out.writeDouble(score);
+ }
+
+ @Override
+ public void fromData(DataInput in) throws IOException,
ClassNotFoundException {
+ score = in.readDouble();
+ }
+ }
+
+ private static class PRQueryTraceTestHook implements TestHook, Serializable {
+
+ private final Map<String, Boolean> hooks = new HashMap<>();
+
+ Map<String, Boolean> getHooks() {
+ return hooks;
+ }
+
+ @Override
+ public void doTestHook(int spot) {
+ // nothing
+ }
+
+ @Override
+ public void doTestHook(String spot) {
+ hooks.put(spot, Boolean.TRUE);
+ }
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithIndexAndPdxDistributedTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithIndexAndPdxDistributedTest.java
new file mode 100644
index 0000000..0497857
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithIndexAndPdxDistributedTest.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.function.Consumer;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.query.Index;
+import org.apache.geode.cache.query.Query;
+import org.apache.geode.cache.query.QueryService;
+import org.apache.geode.cache.query.SelectResults;
+import org.apache.geode.cache.query.Struct;
+import org.apache.geode.pdx.PdxReader;
+import org.apache.geode.pdx.PdxSerializable;
+import org.apache.geode.pdx.PdxWriter;
+import org.apache.geode.test.dunit.SerializableRunnableIF;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedTestRule;
+import org.apache.geode.test.junit.categories.DistributedTest;
+
+/**
+ * Extracted from {@link PRQueryDistributedTest}.
+ */
+@Category(DistributedTest.class)
+@RunWith(JUnitParamsRunner.class)
+@SuppressWarnings("serial")
+public class PRQueryWithIndexAndPdxDistributedTest implements Serializable {
+
+ private static final String REGION_NAME =
+ PRQueryWithIndexAndPdxDistributedTest.class.getSimpleName();
+
+ private VM vm0;
+ private VM vm1;
+
+ @ClassRule
+ public static DistributedTestRule distributedTestRule = new
DistributedTestRule();
+
+ @Rule
+ public CacheRule cacheRule = new CacheRule();
+
+ @Before
+ public void setUp() {
+ vm0 = getVM(0);
+ vm1 = getVM(1);
+
+ vm0.invoke(() -> cacheRule.createCache());
+ vm1.invoke(() -> cacheRule.createCache());
+ }
+
+ @Test
+ @Parameters({"HASH_INDEX", "RANGE_INDEX", "RANGE_INDEX_WITH_PDX"})
+ @TestCaseName("{method}({params})")
+ public void createIndexDoesNotDeserializePdxObjects(IndexCreation
indexCreation) {
+ // the 3 parameters from IndexCreation enum:
+ SerializableRunnableIF createIndex = () -> {
+ indexCreation.createIndex(cacheRule.getCache());
+ };
+ PdxAssetFactory valueSupplier = indexCreation.valueSupplier();
+ String queryString = indexCreation.queryString();
+
+ // the test:
+ vm0.invoke(() -> {
+ PartitionAttributesFactory paf = new
PartitionAttributesFactory().setTotalNumBuckets(10);
+ cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION)
+ .setPartitionAttributes(paf.create()).create(REGION_NAME);
+ });
+ vm1.invoke(() -> {
+ PartitionAttributesFactory paf = new
PartitionAttributesFactory().setTotalNumBuckets(10);
+ cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION)
+ .setPartitionAttributes(paf.create()).create(REGION_NAME);
+ });
+
+ // Do Puts. These objects cannot be deserialized because they throw an
exception
+ vm0.invoke(() -> {
+ Region region = cacheRule.getCache().getRegion(REGION_NAME);
+ region.put(0, new PdxNotDeserializableAsset(0, "B"));
+ region.put(10, new PdxNotDeserializableAsset(1, "B"));
+ region.put(1, new PdxNotDeserializableAsset(1, "B"));
+ IntStream.range(11, 100).forEach(i -> region.put(i,
valueSupplier.getAsset(i)));
+ });
+
+ // If this tries to deserialize the assets, it will fail
+ vm0.invoke(createIndex);
+
+ vm0.invoke(() -> {
+ QueryService queryService = cacheRule.getCache().getQueryService();
+ Query query = queryService.newQuery(queryString); // enum
+ SelectResults<Struct> results = (SelectResults) query.execute();
+
+ assertThat(results).hasSize(3);
+
+ Index index =
queryService.getIndex(cacheRule.getCache().getRegion(REGION_NAME),
+ "ContractDocumentIndex");
+
+ assertThat(index.getStatistics().getTotalUses()).isEqualTo(1);
+ });
+ }
+
+ private enum IndexCreation {
+ HASH_INDEX((cache) -> {
+ try {
+ cache.getQueryService().createHashIndex("ContractDocumentIndex",
"document",
+ Region.SEPARATOR + REGION_NAME);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }, "select assetId, document from " + Region.SEPARATOR + REGION_NAME
+ + " where document='B' limit 1000",
+ i -> new PdxNotDeserializableAsset(i, Integer.toString(i))),
+
+ RANGE_INDEX((cache) -> {
+ try {
+ cache.getQueryService().createIndex("ContractDocumentIndex", "ref",
+ Region.SEPARATOR + REGION_NAME + " r, r.references ref");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }, "select r.assetId, r.document from " + Region.SEPARATOR + REGION_NAME
+ + " r, r.references ref where ref='B_2' limit 1000",
+ i -> new PdxNotDeserializableAsset(i, Integer.toString(i))),
+
+ RANGE_INDEX_WITH_PDX((cache) -> {
+ try {
+ cache.getQueryService().createIndex("ContractDocumentIndex", "ref",
+ Region.SEPARATOR + REGION_NAME + " r, r.references ref");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }, "select r from " + Region.SEPARATOR + REGION_NAME
+ + " r, r.references ref where ref='B_2' limit 1000",
+ i -> new PdxAsset(i, Integer.toString(i)));
+
+ private final Consumer<Cache> strategy;
+ private final String queryString;
+ private final PdxAssetFactory valueSupplier;
+
+ IndexCreation(Consumer<Cache> strategy, String queryString,
PdxAssetFactory valueSupplier) {
+ this.strategy = strategy;
+ this.queryString = queryString;
+ this.valueSupplier = valueSupplier;
+ }
+
+ void createIndex(Cache cache) {
+ strategy.accept(cache);
+ }
+
+ String queryString() {
+ return queryString;
+ }
+
+ PdxAssetFactory valueSupplier() {
+ return valueSupplier;
+ }
+ }
+
+ private interface PdxAssetFactory extends Serializable {
+ PdxAsset getAsset(int i);
+ }
+
+ private static class PdxNotDeserializableAsset extends PdxAsset {
+
+ public PdxNotDeserializableAsset() {
+ throw new RuntimeException("Preventing Deserialization of Asset");
+ }
+
+ PdxNotDeserializableAsset(final int assetId, final String document) {
+ super(assetId, document);
+ }
+
+ @Override
+ public void fromData(final PdxReader reader) {
+ throw new RuntimeException("Not allowing us to deserialize one of
these");
+ }
+ }
+
+ private static class PdxAsset implements PdxSerializable {
+
+ private int assetId;
+ private String document;
+ private Collection<String> references = new ArrayList<>();
+
+ public PdxAsset() {
+ // nothing
+ }
+
+ PdxAsset(final int assetId, final String document) {
+ this.assetId = assetId;
+ this.document = document;
+ references.add(document + "_1");
+ references.add(document + "_2");
+ references.add(document + "_3");
+ }
+
+ @Override
+ public void toData(final PdxWriter writer) {
+ writer.writeString("document", document);
+ writer.writeInt("assetId", assetId);
+ writer.writeObject("references", references);
+ }
+
+ @Override
+ public void fromData(final PdxReader reader) {
+ document = reader.readString("document");
+ assetId = reader.readInt("assetId");
+ references = (Collection<String>) reader.readObject("references");
+ }
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithIndexDistributedTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithIndexDistributedTest.java
new file mode 100644
index 0000000..ecc8797
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithIndexDistributedTest.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.Region.SEPARATOR;
+import static org.apache.geode.test.dunit.DUnitEnv.get;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.stream.IntStream;
+
+import junitparams.JUnitParamsRunner;
+import junitparams.Parameters;
+import junitparams.naming.TestCaseName;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.DataSerializable;
+import org.apache.geode.InternalGemFireException;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.cache.query.IndexInvalidException;
+import org.apache.geode.cache.query.internal.index.AbstractIndex;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedTestRule;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
+
+/**
+ * Extracted from {@link PRQueryDistributedTest}.
+ */
+@Category(DistributedTest.class)
+@RunWith(JUnitParamsRunner.class)
+@SuppressWarnings("serial")
+public class PRQueryWithIndexDistributedTest implements Serializable {
+
+ private String regionName;
+
+ private VM vm0;
+ private VM vm1;
+
+ @ClassRule
+ public static DistributedTestRule distributedTestRule = new
DistributedTestRule();
+
+ @Rule
+ public CacheRule cacheRule = new CacheRule();
+
+ @Rule
+ public SerializableTestName testName = new SerializableTestName();
+
+ @Before
+ public void setUp() {
+ vm0 = getVM(0);
+ vm1 = getVM(1);
+
+ regionName = getClass().getSimpleName() + "_" + testName.getMethodName();
+ }
+
+ @Test
+ @Parameters({"OnLocalThrowsIndexInvalidException",
"OnRemoteThrowsInternalGemFireException"})
+ @TestCaseName("{method}({params})")
+ public void failureToCreateIndex(VmThrows whichVmAndException) {
+ VM vmToFailCreationOn = getVM(whichVmAndException.vmIndex());
+ Class<? extends Exception> exceptionClass =
whichVmAndException.exceptionClass();
+
+ vm0.invoke(() -> {
+ cacheRule.createCache();
+ PartitionAttributesFactory paf = new
PartitionAttributesFactory().setTotalNumBuckets(10);
+ cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION)
+ .setPartitionAttributes(paf.create()).create(regionName);
+ });
+ vm1.invoke(() -> {
+ cacheRule.createCache();
+ PartitionAttributesFactory paf = new
PartitionAttributesFactory().setTotalNumBuckets(10);
+ cacheRule.getCache().createRegionFactory(RegionShortcut.PARTITION)
+ .setPartitionAttributes(paf.create()).create(regionName);
+ });
+
+ vm0.invoke(() -> {
+ Region region = cacheRule.getCache().getRegion(regionName);
+ IntStream.range(1, 10)
+ .forEach(i -> region.put(i, new
NotDeserializableAsset(vmToFailCreationOn.getId())));
+ });
+
+ vm0.invoke(() -> {
+ assertThatThrownBy(() -> cacheRule.getCache().getQueryService()
+ .createHashIndex("ContractDocumentIndex", "document", SEPARATOR +
regionName))
+ .isInstanceOf(exceptionClass);
+ });
+
+ vm1.invoke(() -> {
+ Region region = cacheRule.getCache().getRegion(regionName);
+ AbstractIndex index = (AbstractIndex)
cacheRule.getCache().getQueryService().getIndex(region,
+ "ContractDocumentIndex");
+ if (index != null) {
+ assertThat(index.isPopulated()).isFalse();
+ }
+ });
+ }
+
+ private enum VmThrows {
+ OnLocalThrowsIndexInvalidException(0, IndexInvalidException.class),
+ OnRemoteThrowsInternalGemFireException(1, InternalGemFireException.class);
+
+ private final int vmIndex;
+ private final Class<? extends Exception> exceptionClass;
+
+ VmThrows(int vmIndex, Class<? extends Exception> exceptionClass) {
+ this.vmIndex = vmIndex;
+ this.exceptionClass = exceptionClass;
+ }
+
+ int vmIndex() {
+ return vmIndex;
+ }
+
+ Class<? extends Exception> exceptionClass() {
+ return exceptionClass;
+ }
+ }
+
+ @SuppressWarnings("unused")
+ private static class NotDeserializableAsset implements DataSerializable {
+
+ private int disallowedPid;
+
+ public NotDeserializableAsset() {
+ // nothing
+ }
+
+ public NotDeserializableAsset(final int disallowedPid) {
+ this.disallowedPid = disallowedPid;
+ }
+
+ @Override
+ public void toData(final DataOutput out) throws IOException {
+ out.writeInt(disallowedPid);
+
+ }
+
+ @Override
+ public void fromData(final DataInput in) throws IOException,
ClassNotFoundException {
+ disallowedPid = in.readInt();
+ if (disallowedPid == get().getPid()) {
+ throw new IOException("Cannot deserialize");
+ }
+ }
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithOrderByDistributedTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithOrderByDistributedTest.java
new file mode 100644
index 0000000..66ca965
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithOrderByDistributedTest.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.Region.SEPARATOR;
+import static org.apache.geode.cache.RegionShortcut.PARTITION;
+import static org.apache.geode.test.dunit.NetworkUtils.getServerHostName;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.Serializable;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Date;
+
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.DataSerializable;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.client.ClientCacheFactory;
+import org.apache.geode.cache.client.ClientRegionShortcut;
+import org.apache.geode.cache.query.QueryService;
+import org.apache.geode.cache.query.SelectResults;
+import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.pdx.JSONFormatter;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.ClientCacheRule;
+import org.apache.geode.test.dunit.rules.DistributedTestRule;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
+
+/**
+ * Extracted from {@link PRQueryDistributedTest}.
+ */
+@Category(DistributedTest.class)
+@SuppressWarnings("serial")
+public class PRQueryWithOrderByDistributedTest implements Serializable {
+
+ private String regionName;
+ private String[] queries;
+
+ private VM server1;
+ private VM server2;
+ private VM client;
+
+ @ClassRule
+ public static DistributedTestRule distributedTestRule = new
DistributedTestRule();
+
+ @Rule
+ public CacheRule cacheRule = new CacheRule();
+
+ @Rule
+ public ClientCacheRule clientCacheRule = new ClientCacheRule();
+
+ @Rule
+ public SerializableTestName testName = new SerializableTestName();
+
+ @Before
+ public void setUp() {
+ server1 = getVM(0);
+ server2 = getVM(1);
+ client = getVM(2);
+
+ regionName = getClass().getSimpleName() + "_" + testName.getMethodName();
+
+ queries = new String[] {
+ "select distinct * from " + SEPARATOR + regionName + " order by
\"date\"",
+ "select distinct \"date\" from " + SEPARATOR + regionName + " order by
\"date\"",
+ "select distinct * from " + SEPARATOR + regionName + " order by
\"time\"",
+ "select distinct \"time\" from " + SEPARATOR + regionName + " order by
\"time\"",
+ "select distinct * from " + SEPARATOR + regionName + " order by
\"timestamp\"",
+ "select distinct \"timestamp\" from " + SEPARATOR + regionName + "
order by \"timestamp\"",
+ "select distinct \"date\" from " + SEPARATOR + regionName + " order by
\"date\".\"score\"",
+ "select distinct * from " + SEPARATOR + regionName + " order by
nested.\"date\"",
+ "select distinct * from " + SEPARATOR + regionName + " order by
nested.\"date\".nonKeyword",
+ "select distinct * from " + SEPARATOR + regionName + " order by
nested.\"date\".\"date\"",
+ "select distinct * from " + SEPARATOR + regionName
+ + " order by nested.\"date\".\"date\".score"};
+
+ String jsonCustomer = "{" + "\"firstName\": \"John\"," + "\"lastName\":
\"Smith\","
+ + " \"age\": 25," + " \"date\":" + " \"" + new Date() + "\"," + "
\"time\":" + " \""
+ + new Time(1000) + "\"," + " \"timestamp\":" + " \"" + new
Timestamp(1000) + "\"" + "}";
+
+ String jsonCustomer1 = "{" + "\"firstName\": \"John1\"," + "\"lastName\":
\"Smith1\","
+ + " \"age\": 25," + " \"date\":" + " \"" + new Date() + "\"," + "
\"time\":" + " \""
+ + new Time(1000) + "\"," + " \"timestamp\":" + " \"" + new
Timestamp(1000) + "\"" + "}";
+
+ String jsonCustomer2 = "{" + "\"firstName\": \"John2\"," + "\"lastName\":
\"Smith2\","
+ + " \"age\": 25," + " \"date\":" + " \"" + new Date() + "\"," + "
\"time\":" + " \""
+ + new Time(1000) + "\"," + " \"timestamp\":" + " \"" + new
Timestamp(1000) + "\"" + "}";
+
+ String jsonCustomer3 = "{" + "\"firstName\": \"John3\"," + "\"lastName\":
\"Smith3\","
+ + " \"age\": 25," + " \"date\":" + " \"" + new TestObject(1) + "\"," +
" \"time\":" + " \""
+ + new Time(1000) + "\"," + " \"timestamp\":" + " \"" + new
Timestamp(1000) + "\"" + "}";
+
+ String jsonCustomer4 = "{" + "\"firstName\": \"John4\"," + "\"lastName\":
\"Smith4\","
+ + " \"age\": 25," + " \"date\":" + " \"" + new TestObject(1) + "\"," +
" \"time\":" + " \""
+ + new Time(1000) + "\"," + " \"timestamp\":" + " \"" + new
Timestamp(1000) + "\","
+ + " \"nested\":" + " \"" + new NestedKeywordObject(1) + "\"" + "}";
+
+ String jsonCustomer5 = "{" + "\"firstName\": \"John5\"," + "\"lastName\":
\"Smith5\","
+ + " \"age\": 25," + " \"date\":" + " \"" + new TestObject(1) + "\"," +
" \"time\":" + " \""
+ + new Time(1000) + "\"," + " \"timestamp\":" + " \"" + new
Timestamp(1000) + "\","
+ + " \"nested\":" + " \""
+ + new NestedKeywordObject(new NestedKeywordObject(new TestObject(1)))
+ "\"" + "}";
+
+ int portForServer1 = server1.invoke("Create Server1", () -> {
+ cacheRule.createCache();
+ Region region =
cacheRule.getCache().createRegionFactory(PARTITION).create(regionName);
+
+ region.put("jsondoc", JSONFormatter.fromJSON(jsonCustomer));
+ region.put("jsondoc1", JSONFormatter.fromJSON(jsonCustomer1));
+ region.put("jsondoc2", JSONFormatter.fromJSON(jsonCustomer2));
+ region.put("jsondoc3", JSONFormatter.fromJSON(jsonCustomer3));
+ region.put("jsondoc4", JSONFormatter.fromJSON(jsonCustomer4));
+ region.put("jsondoc5", JSONFormatter.fromJSON(jsonCustomer5));
+
+ CacheServer server = cacheRule.getCache().addCacheServer();
+ server.setPort(0);
+ server.start();
+ return server.getPort();
+ });
+
+ int portForServer2 = server2.invoke("Create Server2", () -> {
+ cacheRule.createCache();
+ cacheRule.getCache().createRegionFactory(PARTITION).create(regionName);
+ CacheServer server = cacheRule.getCache().addCacheServer();
+ server.setPort(0);
+ server.start();
+ return server.getPort();
+ });
+
+ client.invoke("Create client", () -> {
+ ClientCacheFactory ccf = new ClientCacheFactory();
+ ccf.addPoolServer(getServerHostName(), portForServer1);
+ ccf.addPoolServer(getServerHostName(), portForServer2);
+
+ clientCacheRule.createClientCache(ccf);
+
clientCacheRule.getClientCache().createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
+ .create(regionName);
+ });
+ }
+
+ @Test
+ public void testOrderByOnPRWithReservedKeywords() throws Exception {
+ client.invoke("Execute queries on client", () -> {
+ QueryService queryService =
clientCacheRule.getClientCache().getQueryService();
+
+ for (String query : queries) {
+ SelectResults results = (SelectResults)
queryService.newQuery(query).execute();
+ assertThat(results.size())
+ .as("Size of result set should be greater than 0 for query: " +
query).isGreaterThan(0);
+ }
+ });
+ }
+
+ private static class TestObject implements DataSerializable, Comparable {
+
+ private Double score;
+
+ public TestObject() {
+ // nothing
+ }
+
+ public TestObject(double score) {
+ this.score = score;
+ }
+
+ @Override
+ public int compareTo(Object o) {
+ if (o instanceof TestObject) {
+ return score.compareTo(((TestObject) o).score);
+ }
+ return 1;
+ }
+
+ @Override
+ public void toData(DataOutput out) throws IOException {
+ out.writeDouble(score);
+ }
+
+ @Override
+ public void fromData(DataInput in) throws IOException,
ClassNotFoundException {
+ score = in.readDouble();
+ }
+ }
+
+ @SuppressWarnings("unused")
+ private static class NestedKeywordObject implements Serializable {
+
+ private final Object date;
+
+ NestedKeywordObject(Object object) {
+ date = object;
+ }
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithPdxDuringRebalanceRegressionTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithPdxDuringRebalanceRegressionTest.java
new file mode 100644
index 0000000..885fc4b
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRQueryWithPdxDuringRebalanceRegressionTest.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.Region.SEPARATOR;
+import static org.apache.geode.cache.RegionShortcut.PARTITION;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_PROXY;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.control.RebalanceOperation;
+import org.apache.geode.cache.control.RebalanceResults;
+import org.apache.geode.cache.query.FunctionDomainException;
+import org.apache.geode.cache.query.NameResolutionException;
+import org.apache.geode.cache.query.Query;
+import org.apache.geode.cache.query.QueryInvocationTargetException;
+import org.apache.geode.cache.query.SelectResults;
+import org.apache.geode.cache.query.TypeMismatchException;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.internal.cache.partitioned.QueryMessage;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedTestRule;
+import org.apache.geode.test.dunit.rules.SharedErrorCollector;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
+
+/**
+ * Extracted from {@link PRQueryDistributedTest}.
+ *
+ * <p>
+ * TRAC #43102: hang while executing query with pdx objects
+ */
+@Category(DistributedTest.class)
+@SuppressWarnings("serial")
+public class PRQueryWithPdxDuringRebalanceRegressionTest implements
Serializable {
+
+ private static final AtomicReference<RebalanceResults> REBALANCE_RESULTS =
+ new AtomicReference<>();
+
+ private String regionName;
+
+ private VM vm0;
+ private VM vm1;
+ private VM vm2;
+
+ @ClassRule
+ public static DistributedTestRule distributedTestRule = new
DistributedTestRule();
+
+ @Rule
+ public CacheRule cacheRule = new CacheRule();
+
+ @Rule
+ public SerializableTestName testName = new SerializableTestName();
+
+ @Rule
+ public SharedErrorCollector errorCollector = new SharedErrorCollector();
+
+ @Before
+ public void setUp() {
+ vm0 = getVM(0);
+ vm1 = getVM(1);
+ vm2 = getVM(2);
+
+ regionName = getClass().getSimpleName() + "_" + testName.getMethodName();
+ }
+
+ /**
+ * 1. Buckets are created on several nodes <br>
+ * 2. A query is started 3. While the query is executing, several buckets
are moved.
+ */
+ @Test
+ public void testRebalanceDuringQueryEvaluation() throws Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm1.invoke(() -> createBuckets());
+ vm2.invoke(() -> createPartitionedRegion());
+
+ // Add a listener that will trigger a rebalance as soon as the query
arrives on this node.
+ vm1.invoke("add listener", () -> {
+ DistributionMessageObserver.setInstance(new
DistributionMessageObserver() {
+
+ @Override
+ public void beforeProcessMessage(ClusterDistributionManager dm,
+ DistributionMessage message) {
+ if (message instanceof QueryMessage) {
+ RebalanceOperation rebalance =
+
cacheRule.getCache().getResourceManager().createRebalanceFactory().start();
+ // wait for the rebalance
+ try {
+ REBALANCE_RESULTS.compareAndSet(null, rebalance.getResults());
+ } catch (CancellationException | InterruptedException e) {
+ errorCollector.addError(e);
+ }
+ }
+ }
+ });
+ });
+
+ vm0.invoke(() -> executeQuery());
+
+ vm1.invoke("check rebalance happened", () -> {
+ assertThat(REBALANCE_RESULTS.get()).isNotNull();
+
assertThat(REBALANCE_RESULTS.get().getTotalBucketTransfersCompleted()).isEqualTo(5);
+ });
+ }
+
+ private void createAccessor() {
+ cacheRule.createCache();
+ PartitionAttributesFactory paf =
+ new
PartitionAttributesFactory().setTotalNumBuckets(10).setLocalMaxMemory(0);
+
cacheRule.getCache().createRegionFactory(PARTITION_PROXY).setPartitionAttributes(paf.create())
+ .create(regionName);
+ }
+
+ private void createPartitionedRegion() {
+ cacheRule.createCache();
+ PartitionAttributesFactory paf = new
PartitionAttributesFactory().setTotalNumBuckets(10);
+
cacheRule.getCache().createRegionFactory(PARTITION).setPartitionAttributes(paf.create())
+ .create(regionName);
+ }
+
+ private void createBuckets() {
+ Region region = cacheRule.getCache().getRegion(regionName);
+ for (int i = 0; i < 10; i++) {
+ region.put(i, i);
+ }
+ }
+
+ private void executeQuery() throws NameResolutionException,
TypeMismatchException,
+ QueryInvocationTargetException, FunctionDomainException {
+ Query query = cacheRule.getCache().getQueryService()
+ .newQuery("select * from " + SEPARATOR + regionName + " r where r >
0");
+ SelectResults results = (SelectResults) query.execute();
+ assertThat(results.asSet()).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9);
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/PRWithIndexAfterRebalanceRegressionTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRWithIndexAfterRebalanceRegressionTest.java
new file mode 100644
index 0000000..cc752cf
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/PRWithIndexAfterRebalanceRegressionTest.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.apache.geode.cache.Region.SEPARATOR;
+import static org.apache.geode.cache.RegionShortcut.PARTITION;
+import static org.apache.geode.cache.RegionShortcut.PARTITION_PROXY;
+import static org.apache.geode.test.dunit.VM.getVM;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.List;
+
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.DataSerializable;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.query.Index;
+import org.apache.geode.cache.query.IndexExistsException;
+import org.apache.geode.cache.query.IndexNameConflictException;
+import org.apache.geode.cache.query.QueryService;
+import org.apache.geode.cache.query.RegionNotFoundException;
+import org.apache.geode.cache.query.internal.index.PartitionedIndex;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.CacheRule;
+import org.apache.geode.test.dunit.rules.DistributedTestRule;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
+
+/**
+ * Extracted from {@link PRQueryDistributedTest}.
+ *
+ * <p>
+ * TRAC #50749: RegionDestroyedException when running a OQL inside function
for a colocated region
+ */
+@Category(DistributedTest.class)
+@SuppressWarnings("serial")
+public class PRWithIndexAfterRebalanceRegressionTest implements Serializable {
+
+ public static final String INDEX_NAME = "prIndex";
+
+ private String regionName;
+
+ private VM vm0;
+ private VM vm1;
+ private VM vm2;
+ private VM vm3;
+
+ @ClassRule
+ public static DistributedTestRule distributedTestRule = new
DistributedTestRule();
+
+ @Rule
+ public CacheRule cacheRule = new CacheRule();
+
+ @Rule
+ public SerializableTestName testName = new SerializableTestName();
+
+ @Before
+ public void setUp() {
+ vm0 = getVM(0);
+ vm1 = getVM(1);
+ vm2 = getVM(2);
+ vm3 = getVM(3);
+
+ regionName = getClass().getSimpleName() + "_" + testName.getMethodName();
+ }
+
+ /**
+ * 1. Indexes and Buckets are created on several nodes <br>
+ * 2. Buckets are moved <br>
+ * 3. Check to make sure we don't have lingering bucket indexes with bucket
regions already
+ * destroyed
+ */
+ @Test
+ public void testRebalanceWithIndex() throws Exception {
+ vm0.invoke(() -> createAccessor());
+ vm1.invoke(() -> createPartitionedRegion());
+ vm2.invoke(() -> createPartitionedRegion());
+
+ vm1.invoke(() -> createIndex(INDEX_NAME, "r.score", SEPARATOR + regionName
+ " r"));
+
+ // Do Puts
+ vm1.invoke("putting data", () -> {
+ Region region = cacheRule.getCache().getRegion(regionName);
+ for (int i = 0; i < 2000; i++) {
+ region.put(i, new TestObject(i));
+ }
+ });
+
+ vm3.invoke(() -> createPartitionedRegion());
+
+ // Rebalance
+ vm1.invoke("rebalance", () -> {
+
cacheRule.getCache().getResourceManager().createRebalanceFactory().start().getResults();
+ });
+
+ vm1.invoke(() -> checkForLingeringBucketIndexes(INDEX_NAME));
+ vm2.invoke(() -> checkForLingeringBucketIndexes(INDEX_NAME));
+ }
+
+ private void createAccessor() {
+ cacheRule.createCache();
+ PartitionAttributesFactory paf =
+ new
PartitionAttributesFactory().setTotalNumBuckets(10).setLocalMaxMemory(0);
+
cacheRule.getCache().createRegionFactory(PARTITION_PROXY).setPartitionAttributes(paf.create())
+ .create(regionName);
+ }
+
+ private void createPartitionedRegion() {
+ cacheRule.createCache();
+ PartitionAttributesFactory paf = new
PartitionAttributesFactory().setTotalNumBuckets(10);
+
cacheRule.getCache().createRegionFactory(PARTITION).setPartitionAttributes(paf.create())
+ .create(regionName);
+ }
+
+ private void createIndex(String indexName, String indexedExpression, String
regionPath)
+ throws IndexNameConflictException, IndexExistsException,
RegionNotFoundException {
+ cacheRule.getCache().getQueryService().createIndex(indexName,
indexedExpression, regionPath);
+ }
+
+ private void checkForLingeringBucketIndexes(String indexName) {
+ Region region = cacheRule.getCache().getRegion(regionName);
+ QueryService queryService = cacheRule.getCache().getQueryService();
+ PartitionedIndex index = (PartitionedIndex) queryService.getIndex(region,
indexName);
+ for (Index bucketIndex : (List<Index>) index.getBucketIndexes()) {
+ assertThat(bucketIndex.getRegion().isDestroyed()).isFalse();
+ }
+ }
+
+ private static class TestObject implements DataSerializable, Comparable {
+
+ private Double score;
+
+ public TestObject() {
+ // nothing
+ }
+
+ public TestObject(double score) {
+ this.score = score;
+ }
+
+ @Override
+ public int compareTo(Object o) {
+ if (o instanceof TestObject) {
+ return score.compareTo(((TestObject) o).score);
+ }
+ return 1;
+ }
+
+ @Override
+ public void toData(DataOutput out) throws IOException {
+ out.writeDouble(score);
+ }
+
+ @Override
+ public void fromData(DataInput in) throws IOException,
ClassNotFoundException {
+ score = in.readDouble();
+ }
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionQueryDUnitTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionQueryDUnitTest.java
deleted file mode 100644
index 07aa23a..0000000
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionQueryDUnitTest.java
+++ /dev/null
@@ -1,1359 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
- * agreements. See the NOTICE file distributed with this work for additional
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
- * or implied. See the License for the specific language governing permissions
and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.stream.IntStream;
-
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.DataSerializable;
-import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheException;
-import org.apache.geode.cache.PartitionAttributesFactory;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.RegionShortcut;
-import org.apache.geode.cache.client.ClientCache;
-import org.apache.geode.cache.client.ClientCacheFactory;
-import org.apache.geode.cache.client.ClientRegionShortcut;
-import org.apache.geode.cache.control.RebalanceOperation;
-import org.apache.geode.cache.control.RebalanceResults;
-import org.apache.geode.cache.query.FunctionDomainException;
-import org.apache.geode.cache.query.Index;
-import org.apache.geode.cache.query.IndexExistsException;
-import org.apache.geode.cache.query.IndexNameConflictException;
-import org.apache.geode.cache.query.NameResolutionException;
-import org.apache.geode.cache.query.Query;
-import org.apache.geode.cache.query.QueryInvocationTargetException;
-import org.apache.geode.cache.query.QueryService;
-import org.apache.geode.cache.query.RegionNotFoundException;
-import org.apache.geode.cache.query.SelectResults;
-import org.apache.geode.cache.query.Struct;
-import org.apache.geode.cache.query.TypeMismatchException;
-import org.apache.geode.cache.query.internal.DefaultQuery;
-import org.apache.geode.cache.query.internal.index.AbstractIndex;
-import org.apache.geode.cache.query.internal.index.IndexManager;
-import org.apache.geode.cache.query.internal.index.PartitionedIndex;
-import org.apache.geode.cache.server.CacheServer;
-import org.apache.geode.cache30.CacheSerializableRunnable;
-import org.apache.geode.distributed.internal.ClusterDistributionManager;
-import org.apache.geode.distributed.internal.DistributionMessage;
-import org.apache.geode.distributed.internal.DistributionMessageObserver;
-import org.apache.geode.internal.AvailablePortHelper;
-import org.apache.geode.internal.cache.partitioned.QueryMessage;
-import org.apache.geode.pdx.JSONFormatter;
-import org.apache.geode.pdx.PdxReader;
-import org.apache.geode.pdx.PdxSerializable;
-import org.apache.geode.pdx.PdxWriter;
-import org.apache.geode.test.dunit.Assert;
-import org.apache.geode.test.dunit.DUnitEnv;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.NetworkUtils;
-import org.apache.geode.test.dunit.SerializableCallable;
-import org.apache.geode.test.dunit.SerializableRunnable;
-import org.apache.geode.test.dunit.SerializableRunnableIF;
-import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-
-@Category(DistributedTest.class)
-@Ignore("GEODE-4970: CI failure:
PartitionedRegionQueryDUnitTest.tesRangeIndexWithPdxObjects may hang")
-public class PartitionedRegionQueryDUnitTest extends JUnit4CacheTestCase {
-
- private static final AtomicReference<RebalanceResults> rebalanceResults =
- new AtomicReference<RebalanceResults>();
-
- @Test
- public void testReevaluationDueToUpdateInProgress() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- // VM vm2 = host.getVM(2);
-
- createPR(vm0);
- createPR(vm1);
- createIndex(vm0, "compactRangeIndex", "entry.value", "/region.entrySet
entry");
-
- // Do Puts
- vm0.invoke(new SerializableRunnable("putting data") {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- for (int i = 0; i < 100; i++) {
- region.put(i, new TestObject(i));
- }
- }
- });
-
- vm0.invoke(new SerializableRunnable("resetting sqt") {
- public void run() {
- IndexManager.setIndexBufferTime(Long.MAX_VALUE, Long.MAX_VALUE);
- }
- });
-
- vm1.invoke(new SerializableRunnable("resetting sqt") {
- public void run() {
- IndexManager.setIndexBufferTime(Long.MAX_VALUE, Long.MAX_VALUE);
- }
- });
-
- vm0.invoke(new SerializableRunnable("query") {
- public void run() {
- try {
- QueryService qs = getCache().getQueryService();
- qs.newQuery(
- "SELECT DISTINCT entry.key, entry.value FROM /region.entrySet
entry WHERE entry.value.score >= 5 AND entry.value.score <= 10 ORDER BY value
asc")
- .execute();
- } catch (QueryInvocationTargetException e) {
- e.printStackTrace();
- fail(e.toString());
- } catch (NameResolutionException e) {
- fail(e.toString());
-
- } catch (TypeMismatchException e) {
- fail(e.toString());
-
- } catch (FunctionDomainException e) {
- fail(e.toString());
-
- }
-
- }
- });
- }
-
- @Test
- public void testHashIndexDoesNotDeserializePdxObjects() {
- SerializableRunnableIF createIndex = () -> {
- Cache cache = getCache();
- cache.getQueryService().createHashIndex("ContractDocumentIndex",
"document", "/region");
- };
- String queryString = "select assetId,document from /region where
document='B' limit 1000";
-
- PdxAssetFactory value = i -> new PdxNotDeserializableAsset(i,
Integer.toString(i));
- createIndexDoesNotDerializePdxObjects(createIndex, queryString, value);
- }
-
- @Test
- public void tesRangeIndexDoesNotDeserializePdxObjects() {
- SerializableRunnableIF createIndex = () -> {
- Cache cache = getCache();
- cache.getQueryService().createIndex("ContractDocumentIndex", "ref",
- "/region r, r.references ref");
- };
- String queryString =
- "select r.assetId,r.document from /region r, r.references ref where
ref='B_2' limit 1000";
- PdxAssetFactory value = i -> new PdxNotDeserializableAsset(i,
Integer.toString(i));
- createIndexDoesNotDerializePdxObjects(createIndex, queryString, value);
- }
-
- @Test
- public void tesRangeIndexWithPdxObjects() {
- SerializableRunnableIF createIndex = () -> {
- Cache cache = getCache();
- cache.getQueryService().createIndex("ContractDocumentIndex", "ref",
- "/region r, r.references ref");
- };
- String queryString = "select r from /region r, r.references ref where
ref='B_2' limit 1000";
-
- PdxAssetFactory value = i -> new PdxAsset(i, Integer.toString(i));
- createIndexDoesNotDerializePdxObjects(createIndex, queryString, value);
- }
-
- private void createIndexDoesNotDerializePdxObjects(final
SerializableRunnableIF createIndex,
- final String queryString, PdxAssetFactory valueSupplier) {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- SerializableRunnableIF createPR = () -> {
- Cache cache = getCache();
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(10);
-
cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create())
- .create("region");
- };
- vm0.invoke(createPR);
- vm1.invoke(createPR);
-
- // Do Puts. These objects can't be deserialized because they throw
- // and exception from the constructor
- vm0.invoke(() -> {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- region.put(0, new PdxNotDeserializableAsset(0, "B"));
- region.put(10, new PdxNotDeserializableAsset(1, "B"));
- region.put(1, new PdxNotDeserializableAsset(1, "B"));
- IntStream.range(11, 100).forEach(i -> region.put(i,
valueSupplier.getAsset(i)));
- });
-
- // If this tries to deserialize the assets, it will fail
- vm0.invoke(createIndex);
-
- vm0.invoke(() -> {
- QueryService qs = getCache().getQueryService();
- SelectResults<Struct> results = (SelectResults)
qs.newQuery(queryString).execute();
-
- assertEquals(3, results.size());
- final Index index = qs.getIndex(getCache().getRegion("region"),
"ContractDocumentIndex");
- assertEquals(1, index.getStatistics().getTotalUses());
- });
- }
-
- @Test
- public void testFailureToCreateIndexOnLocalNodeThrowsException() {
- VM vmToFailCreationOn = Host.getHost(0).getVM(0);
- failToCreateIndexOnNode(vmToFailCreationOn);
- }
-
- @Test
- public void testFailureToCreateIndexOnRemoteNodeThrowsException() {
- VM vmToFailCreationOn = Host.getHost(0).getVM(1);
- failToCreateIndexOnNode(vmToFailCreationOn);
- }
-
- private void failToCreateIndexOnNode(final VM vmToFailCreationOn) {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
-
- SerializableRunnableIF createPR = () -> {
- Cache cache = getCache();
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(10);
-
cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create())
- .create("region");
- };
- vm0.invoke(createPR);
- vm1.invoke(createPR);
-
- vm0.invoke(() -> {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- IntStream.range(1, 10)
- .forEach(i -> region.put(i, new
NotDeserializableAsset(vmToFailCreationOn.getId())));
- });
-
- vm0.invoke(() -> {
- Cache cache = getCache();
- try {
- cache.getQueryService().createHashIndex("ContractDocumentIndex",
"document", "/region");
- fail("Should have thrown an exception");
- } catch (Exception expected) {
- }
- });
-
- vm1.invoke(() -> {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- final AbstractIndex index =
- (AbstractIndex) cache.getQueryService().getIndex(region,
"ContractDocumentIndex");
- // either the index was not created locally or it was created but is
populated flag should not
- // be set.
- if (index != null) {
- assertFalse(index.isPopulated());
- }
- });
- }
-
- /**
- * Test of bug 43102. 1. Buckets are created on several nodes 2. A query is
started 3. While the
- * query is executing, several buckets are moved.
- */
- @Test
- public void testRebalanceDuringQueryEvaluation() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
-
- createAccessor(vm0);
-
- createPR(vm1);
-
- createBuckets(vm1);
-
- createPR(vm2);
-
- // Add a listener that will trigger a rebalance
- // as soon as the query arrives on this node.
- vm1.invoke(new SerializableRunnable("add listener") {
-
- public void run() {
- DistributionMessageObserver.setInstance(new
DistributionMessageObserver() {
-
- @Override
- public void beforeProcessMessage(ClusterDistributionManager dm,
- DistributionMessage message) {
- if (message instanceof QueryMessage) {
- RebalanceOperation rebalance =
-
getCache().getResourceManager().createRebalanceFactory().start();
- // wait for the rebalance
- try {
- rebalanceResults.compareAndSet(null, rebalance.getResults());
- } catch (CancellationException e) {
- // ignore
- } catch (InterruptedException e) {
- // ignore
- }
- }
- }
- });
-
- }
- });
-
- executeQuery(vm0);
-
- vm1.invoke(new SerializableRunnable("check rebalance happened") {
-
- public void run() {
- assertNotNull(rebalanceResults.get());
- assertEquals(5,
rebalanceResults.get().getTotalBucketTransfersCompleted());
- }
- });
- }
-
- /**
- * Test of bug 50749 1. Indexes and Buckets are created on several nodes 2.
Buckets are moved 3.
- * Check to make sure we don't have lingering bucket indexes with bucket
regions already destroyed
- */
- @Test
- public void testRebalanceWithIndex() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- VM vm3 = host.getVM(3);
-
- createAccessor(vm0);
-
- createPR(vm1);
- createPR(vm2);
- createIndex(vm1, "prIndex", "r.score", "/region r");
-
- // Do Puts
- vm1.invoke(new SerializableRunnable("putting data") {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- for (int i = 0; i < 2000; i++) {
- region.put(i, new TestObject(i));
- }
- }
- });
-
- createPR(vm3);
-
- // Rebalance
- vm1.invoke(new SerializableRunnable("rebalance") {
- public void run() {
- RebalanceOperation rebalance =
- getCache().getResourceManager().createRebalanceFactory().start();
- // wait for the rebalance
- try {
- rebalance.getResults();
- } catch (CancellationException e) {
- // ignore
- } catch (InterruptedException e) {
- // ignore
- }
- }
- });
-
- checkForLingeringBucketIndexes(vm1, "prIndex");
- checkForLingeringBucketIndexes(vm2, "prIndex");
-
- closeCache(vm1, vm2, vm3, vm0);
- }
-
- /**
- * tests trace for pr queries when <trace> is used and query verbose is set
to true on local and
- * remote servers
- */
- @Test
- public void testPartitionRegionDebugMessageQueryTraceOnBothServers() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- createAccessor(vm0);
- createPR(vm1);
- createPR(vm2);
- createBuckets(vm1);
-
- final PRQueryTraceTestHook server1TestHook = new PRQueryTraceTestHook();
- final PRQueryTraceTestHook server2TestHook = new PRQueryTraceTestHook();
-
- try {
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server1TestHook;
- DefaultQuery.QUERY_VERBOSE = true;
- }
- });
-
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server2TestHook;
- DefaultQuery.QUERY_VERBOSE = true;
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query =
- cache.getQueryService().newQuery("<trace> select * from /region
r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4,
5, 6, 7, 8, 9})),
- results.asSet());
-
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
-
- // verify hooks
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server1TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server1TestHook.getHooks().get("Pull off PR Query Trace
Info"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
String"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
Info From Local Node"));
- }
- });
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server2TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server2TestHook.getHooks().get("Populating Trace Info for
Remote Query"));
- assertTrue(server2TestHook.getHooks().get("Create PR Query Trace
Info for Remote Query"));
- }
- });
- } finally {
- setQueryVerbose(false, vm1, vm2);
- }
- }
-
-
- /**
- * tests trace for pr queries when <trace> is used and query verbose is set
to true on local but
- * false on remote servers All flags should be true still as the <trace> is
OR'd with query
- * verbose flag
- */
- @Test
- public void testPartitionRegionDebugMessageQueryTraceOnLocalServerOnly() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- createAccessor(vm0);
- createPR(vm1);
- createPR(vm2);
- createBuckets(vm1);
-
- final PRQueryTraceTestHook server1TestHook = new PRQueryTraceTestHook();
- final PRQueryTraceTestHook server2TestHook = new PRQueryTraceTestHook();
- try {
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server1TestHook;
- DefaultQuery.QUERY_VERBOSE = true;
- }
- });
-
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server2TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query =
- cache.getQueryService().newQuery("<trace> select * from /region
r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4,
5, 6, 7, 8, 9})),
- results.asSet());
-
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server1TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server1TestHook.getHooks().get("Pull off PR Query Trace
Info"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
String"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
Info From Local Node"));
- }
- });
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server2TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server2TestHook.getHooks().get("Populating Trace Info for
Remote Query"));
- assertTrue(server2TestHook.getHooks().get("Create PR Query Trace
Info for Remote Query"));
- }
- });
- } finally {
- setQueryVerbose(false, vm1, vm2);
- }
- }
-
-
- /**
- * tests trace for pr queries when <trace> is NOT used and query verbose is
set to true on local
- * but false on remote The remote should not send a pr query trace info back
because trace was not
- * requested
- */
- @Test
- public void
testPartitionRegionDebugMessageQueryTraceOffLocalServerVerboseOn() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- createAccessor(vm0);
- createPR(vm1);
- createPR(vm2);
- createBuckets(vm1);
-
- final PRQueryTraceTestHook server1TestHook = new PRQueryTraceTestHook();
- final PRQueryTraceTestHook server2TestHook = new PRQueryTraceTestHook();
- try {
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server1TestHook;
- DefaultQuery.QUERY_VERBOSE = true;
- }
- });
-
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server2TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query = cache.getQueryService().newQuery("select * from
/region r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4,
5, 6, 7, 8, 9})),
- results.asSet());
-
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server1TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertNull(server1TestHook.getHooks().get("Pull off PR Query Trace
Info"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
String"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
Info From Local Node"));
- }
- });
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server2TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertNull(server2TestHook.getHooks().get("Populating Trace Info for
Remote Query"));
- assertNull(server2TestHook.getHooks().get("Create PR Query Trace
Info for Remote Query"));
- }
- });
- } finally {
- setQueryVerbose(false, vm1, vm2);
- }
- }
-
-
- /**
- * tests trace for pr queries when <trace> is NOT used and query verbose is
set to false on local
- * but true on remote servers We don't output the string or do anything on
the local side, but we
- * still pull off the object due to the remote server generating and sending
it over
- */
- @Test
- public void testPartitionRegionDebugMessageQueryTraceOffRemoteServerOnly() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- createAccessor(vm0);
- createPR(vm1);
- createPR(vm2);
- createBuckets(vm1);
-
- final PRQueryTraceTestHook server1TestHook = new PRQueryTraceTestHook();
- final PRQueryTraceTestHook server2TestHook = new PRQueryTraceTestHook();
- try {
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server1TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server2TestHook;
- DefaultQuery.QUERY_VERBOSE = true;
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query = cache.getQueryService().newQuery("select * from
/region r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4,
5, 6, 7, 8, 9})),
- results.asSet());
-
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
-
- // verify hooks
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server1TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server1TestHook.getHooks().get("Pull off PR Query Trace
Info"));
- assertNull(server1TestHook.getHooks().get("Create PR Query Trace
String"));
- assertNull(server1TestHook.getHooks().get("Create PR Query Trace
Info From Local Node"));
- }
- });
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server2TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server2TestHook.getHooks().get("Populating Trace Info for
Remote Query"));
- assertTrue(server2TestHook.getHooks().get("Create PR Query Trace
Info for Remote Query"));
- }
- });
- } finally {
- setQueryVerbose(false, vm1, vm2);
- }
- }
-
-
- /**
- * tests trace for pr queries when <trace> is used and query verbose is set
to false on local and
- * remote servers trace is OR'd so the entire trace process should be invoked
- */
- @Test
- public void testPartitionRegionDebugMessageQueryTraceOnRemoteServerOnly() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- createAccessor(vm0);
- createPR(vm1);
- createPR(vm2);
- createBuckets(vm1);
-
- final PRQueryTraceTestHook server1TestHook = new PRQueryTraceTestHook();
- final PRQueryTraceTestHook server2TestHook = new PRQueryTraceTestHook();
- try {
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server1TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server2TestHook;
- DefaultQuery.QUERY_VERBOSE = true;
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query =
- cache.getQueryService().newQuery("<trace> select * from /region
r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4,
5, 6, 7, 8, 9})),
- results.asSet());
-
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
-
- // verify hooks
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server1TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server1TestHook.getHooks().get("Pull off PR Query Trace
Info"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
String"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
Info From Local Node"));
- }
- });
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server2TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server2TestHook.getHooks().get("Populating Trace Info for
Remote Query"));
- assertTrue(server2TestHook.getHooks().get("Create PR Query Trace
Info for Remote Query"));
- }
- });
- } finally {
- setQueryVerbose(false, vm1, vm2);
- }
- }
-
-
- /**
- * tests trace for pr queries when <trace> is NOT used and query verbose is
set to false on local
- * but true remote servers The local node still receives the pr trace info
from the remote node
- * due to query verbose being on however nothing is used on the local side
- */
- @Test
- public void testPartitionRegionDebugMessageQueryTraceOffRemoteServerOn() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- createAccessor(vm0);
- createPR(vm1);
- createPR(vm2);
- createBuckets(vm1);
-
- final PRQueryTraceTestHook server1TestHook = new PRQueryTraceTestHook();
- final PRQueryTraceTestHook server2TestHook = new PRQueryTraceTestHook();
- try {
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server1TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server2TestHook;
- DefaultQuery.QUERY_VERBOSE = true;
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query = cache.getQueryService().newQuery("select * from
/region r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4,
5, 6, 7, 8, 9})),
- results.asSet());
-
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
-
- // verify hooks
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server1TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server1TestHook.getHooks().get("Pull off PR Query Trace
Info"));
- assertNull(server1TestHook.getHooks().get("Create PR Query Trace
String"));
- assertNull(server1TestHook.getHooks().get("Create PR Query Trace
Info From Local Node"));
- }
- });
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server2TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server2TestHook.getHooks().get("Populating Trace Info for
Remote Query"));
- assertTrue(server2TestHook.getHooks().get("Create PR Query Trace
Info for Remote Query"));
- }
- });
- } finally {
- setQueryVerbose(false, vm1, vm2);
- }
- }
-
-
- /**
- * tests trace for pr queries when <trace> is NOT used and query verbose is
set to false on local
- * and remote servers None of our hooks should have triggered
- */
- @Test
- public void testPartitionRegionDebugMessageQueryTraceOffQueryVerboseOff() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- createAccessor(vm0);
- createPR(vm1);
- createPR(vm2);
- createBuckets(vm1);
-
- final PRQueryTraceTestHook server1TestHook = new PRQueryTraceTestHook();
- final PRQueryTraceTestHook server2TestHook = new PRQueryTraceTestHook();
- try {
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server1TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server2TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query = cache.getQueryService().newQuery("select * from
/region r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4,
5, 6, 7, 8, 9})),
- results.asSet());
-
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
-
- // verify hooks
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server1TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertNull(server1TestHook.getHooks().get("Pull off PR Query Trace
Info"));
- assertNull(server1TestHook.getHooks().get("Create PR Query Trace
String"));
- assertNull(server1TestHook.getHooks().get("Create PR Query Trace
Info From Local Node"));
- }
- });
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server2TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertNull(server2TestHook.getHooks().get("Populating Trace Info for
Remote Query"));
- assertNull(server2TestHook.getHooks().get("Create PR Query Trace
Info for Remote Query"));
- }
- });
- } finally {
- setQueryVerbose(false, vm1, vm2);
- }
- }
-
- /**
- * tests trace for pr queries when <trace> is used and query verbose is set
to false on local and
- * remote servers All hooks should have triggered due to trace being used
- */
- @Test
- public void testPartitionRegionDebugMessageQueryTraceOnQueryVerboseOff() {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- createAccessor(vm0);
- createPR(vm1);
- createPR(vm2);
- createBuckets(vm1);
-
- final PRQueryTraceTestHook server1TestHook = new PRQueryTraceTestHook();
- final PRQueryTraceTestHook server2TestHook = new PRQueryTraceTestHook();
- try {
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server1TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.testHook = server2TestHook;
- DefaultQuery.QUERY_VERBOSE = false;
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query =
- cache.getQueryService().newQuery("<trace> select * from /region
r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4,
5, 6, 7, 8, 9})),
- results.asSet());
-
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
-
- vm1.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server1TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server1TestHook.getHooks().get("Pull off PR Query Trace
Info"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
String"));
- assertTrue(server1TestHook.getHooks().get("Create PR Query Trace
Info From Local Node"));
- }
- });
- vm2.invoke(new SerializableRunnable() {
- public void run() {
- PRQueryTraceTestHook server2TestHook = (PRQueryTraceTestHook)
DefaultQuery.testHook;
- assertTrue(server2TestHook.getHooks().get("Populating Trace Info for
Remote Query"));
- assertTrue(server2TestHook.getHooks().get("Create PR Query Trace
Info for Remote Query"));
- }
- });
- } finally {
- setQueryVerbose(false, vm1, vm2);
- }
- }
-
-
- @Test
- public void testOrderByOnPRWithReservedKeywords() throws Exception {
- final Host host = Host.getHost(0);
- final VM server1 = host.getVM(0);
- final VM server2 = host.getVM(1);
- final VM client = host.getVM(2);
- final String regionName = "region1";
-
- final String[] queries = {"select distinct * from /" + regionName + "
order by \"date\"",
- "select distinct \"date\" from /" + regionName + " order by \"date\"",
- "select distinct * from /" + regionName + " order by \"time\"",
- "select distinct \"time\" from /" + regionName + " order by \"time\"",
- "select distinct * from /" + regionName + " order by \"timestamp\"",
- "select distinct \"timestamp\" from /" + regionName + " order by
\"timestamp\"",
- "select distinct \"date\" from /" + regionName + " order by
\"date\".\"score\"",
- "select distinct * from /" + regionName + " order by nested.\"date\"",
- "select distinct * from /" + regionName + " order by
nested.\"date\".nonKeyword",
- "select distinct * from /" + regionName + " order by
nested.\"date\".\"date\"",
- "select distinct * from /" + regionName + " order by
nested.\"date\".\"date\".score",};
-
- // Start server1
- final Integer port1 = (Integer) server1.invoke(new
SerializableCallable("Create Server1") {
- @Override
- public Object call() throws Exception {
- String jsonCustomer = "{" + "\"firstName\": \"John\"," +
"\"lastName\": \"Smith\","
- + " \"age\": 25," + " \"date\":" + " \"" + new java.util.Date() +
"\"," + " \"time\":"
- + " \"" + new java.sql.Time(1000) + "\"," + " \"timestamp\":" + "
\""
- + new java.sql.Timestamp(1000) + "\"" + "}";
-
- String jsonCustomer1 = "{" + "\"firstName\": \"John1\"," +
"\"lastName\": \"Smith1\","
- + " \"age\": 25," + " \"date\":" + " \"" + new java.util.Date() +
"\"," + " \"time\":"
- + " \"" + new java.sql.Time(1000) + "\"," + " \"timestamp\":" + "
\""
- + new java.sql.Timestamp(1000) + "\"" + "}";
-
- String jsonCustomer2 = "{" + "\"firstName\": \"John2\"," +
"\"lastName\": \"Smith2\","
- + " \"age\": 25," + " \"date\":" + " \"" + new java.util.Date() +
"\"," + " \"time\":"
- + " \"" + new java.sql.Time(1000) + "\"," + " \"timestamp\":" + "
\""
- + new java.sql.Timestamp(1000) + "\"" + "}";
- String jsonCustomer3 = "{" + "\"firstName\": \"John3\"," +
"\"lastName\": \"Smith3\","
- + " \"age\": 25," + " \"date\":" + " \"" + new TestObject(1) +
"\"," + " \"time\":"
- + " \"" + new java.sql.Time(1000) + "\"," + " \"timestamp\":" + "
\""
- + new java.sql.Timestamp(1000) + "\"" + "}";
- String jsonCustomer4 = "{" + "\"firstName\": \"John4\"," +
"\"lastName\": \"Smith4\","
- + " \"age\": 25," + " \"date\":" + " \"" + new TestObject(1) +
"\"," + " \"time\":"
- + " \"" + new java.sql.Time(1000) + "\"," + " \"timestamp\":" + "
\""
- + new java.sql.Timestamp(1000) + "\"," + " \"nested\":" + " \""
- + new NestedKeywordObject(1) + "\"" + "}";
- String jsonCustomer5 = "{" + "\"firstName\": \"John5\"," +
"\"lastName\": \"Smith5\","
- + " \"age\": 25," + " \"date\":" + " \"" + new TestObject(1) +
"\"," + " \"time\":"
- + " \"" + new java.sql.Time(1000) + "\"," + " \"timestamp\":" + "
\""
- + new java.sql.Timestamp(1000) + "\"," + " \"nested\":" + " \""
- + new NestedKeywordObject(new NestedKeywordObject(new
TestObject(1))) + "\"" + "}";
-
- Region r1 =
getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
-
- r1.put("jsondoc", JSONFormatter.fromJSON(jsonCustomer));
- r1.put("jsondoc1", JSONFormatter.fromJSON(jsonCustomer1));
- r1.put("jsondoc2", JSONFormatter.fromJSON(jsonCustomer2));
- r1.put("jsondoc3", JSONFormatter.fromJSON(jsonCustomer3));
- r1.put("jsondoc4", JSONFormatter.fromJSON(jsonCustomer4));
- r1.put("jsondoc5", JSONFormatter.fromJSON(jsonCustomer5));
-
- CacheServer server = getCache().addCacheServer();
- int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
- server.setPort(port);
- server.start();
- return port;
- }
- });
-
- // Start server2
- final Integer port2 = (Integer) server2.invoke(new
SerializableCallable("Create Server2") {
- @Override
- public Object call() throws Exception {
- Region r1 =
getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
- CacheServer server = getCache().addCacheServer();
- int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
- server.setPort(port);
- server.start();
- return port;
- }
- });
-
- client.invoke(new SerializableCallable("Create client") {
- @Override
- public Object call() throws Exception {
- ClientCacheFactory cf = new ClientCacheFactory();
- cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()),
port1);
- cf.addPoolServer(NetworkUtils.getServerHostName(server2.getHost()),
port2);
- ClientCache cache = getClientCache(cf);
-
- Region region =
-
cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
- QueryService qs = null;
- SelectResults sr = null;
-
- try {
- qs = getCache().getQueryService();
- } catch (Exception e) {
- Assert.fail("Failed to get QueryService.", e);
- }
-
- for (int i = 0; i < queries.length; i++) {
- try {
- sr = (SelectResults) qs.newQuery(queries[i]).execute();
- assertTrue("Size of resultset should be greater than 0 for query:
" + queries[i],
- sr.size() > 0);
- } catch (Exception e) {
- Assert.fail("Failed executing query ", e);
- }
- }
- return null;
- }
- });
-
- this.closeClient(server1);
- this.closeClient(server2);
- this.closeClient(client);
-
- }
-
- /* Close Client */
- public void closeClient(VM client) {
- SerializableRunnable closeCache = new CacheSerializableRunnable("Close
Client") {
- public void run2() throws CacheException {
- LogWriterUtils.getLogWriter().info("### Close Client. ###");
- try {
- closeCache();
- disconnectFromDS();
- } catch (Exception ex) {
- LogWriterUtils.getLogWriter().info("### Failed to get close client.
###");
- }
- }
- };
-
- client.invoke(closeCache);
- }
-
- private void executeQuery(VM vm0) {
- vm0.invoke(new SerializableRunnable() {
-
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- Query query = cache.getQueryService().newQuery("select * from /region
r where r > 0");
- try {
- SelectResults results = (SelectResults) query.execute();
- assertEquals(new HashSet(Arrays.asList(new Integer[] {1, 2, 3, 4, 5,
6, 7, 8, 9})),
- results.asSet());
- } catch (Exception e) {
- Assert.fail("Bad query", e);
- }
- }
- });
- }
-
- private void checkForLingeringBucketIndexes(VM vm, final String indexName) {
- vm.invoke(new SerializableRunnable() {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- QueryService qs = cache.getQueryService();
- PartitionedIndex index = (PartitionedIndex) qs.getIndex(region,
indexName);
- Iterator iterator = index.getBucketIndexes().iterator();
- int numBucketIndexes = index.getBucketIndexes().size();
- while (iterator.hasNext()) {
- Index bucketIndex = (Index) iterator.next();
- assertFalse(((LocalRegion) bucketIndex.getRegion()).isDestroyed());
- }
- }
- });
- }
-
- private void createBuckets(VM vm) {
- vm.invoke(new SerializableRunnable("create accessor") {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion("region");
- for (int i = 0; i < 10; i++) {
- region.put(i, i);
- }
- }
- });
- }
-
- private void createPR(VM vm) {
- vm.invoke(new SerializableRunnable("create accessor") {
- public void run() {
- Cache cache = getCache();
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(10);
-
cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create())
- .create("region");
- }
- });
- }
-
- private void createAccessor(VM vm) {
- vm.invoke(new SerializableRunnable("create accessor") {
-
- public void run() {
- Cache cache = getCache();
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setTotalNumBuckets(10);
- paf.setLocalMaxMemory(0);
- cache.createRegionFactory(RegionShortcut.PARTITION_PROXY)
- .setPartitionAttributes(paf.create()).create("region");
- }
- });
- }
-
- private void createIndex(VM vm, final String indexName, final String
indexedExpression,
- final String regionPath) {
- vm.invoke(new SerializableRunnable("create index") {
- public void run() {
- try {
- Cache cache = getCache();
- cache.getQueryService().createIndex(indexName, indexedExpression,
regionPath);
- } catch (RegionNotFoundException e) {
- fail(e.toString());
- } catch (IndexExistsException e) {
- fail(e.toString());
- } catch (IndexNameConflictException e) {
- fail(e.toString());
- }
- }
- });
- }
-
- private void closeCache(VM... vms) {
- for (VM vm : vms) {
- vm.invoke(new SerializableRunnable() {
- public void run() {
- getCache().close();
- }
- });
- }
- }
-
- private void setQueryVerbose(final boolean value, VM... vms) {
- for (VM vm : vms) {
- vm.invoke(new SerializableRunnable() {
- public void run() {
- DefaultQuery.QUERY_VERBOSE = value;
- }
- });
- }
- }
-
-
- public static class TestObject implements DataSerializable, Comparable {
- @Override
- public int compareTo(Object o) {
- if (o instanceof TestObject) {
- return score.compareTo(((TestObject) o).score);
- }
- return 1;
- }
-
- public Double score;
-
- public TestObject() {}
-
- public TestObject(double score) {
- this.score = score;
- }
-
- @Override
- public void toData(DataOutput out) throws IOException {
- out.writeDouble(score);
- }
-
- @Override
- public void fromData(DataInput in) throws IOException,
ClassNotFoundException {
- score = in.readDouble();
- }
- }
-
- public interface PdxAssetFactory extends Serializable {
- PdxAsset getAsset(int i);
- }
-
- public static class PdxNotDeserializableAsset extends PdxAsset {
- public int assetId;
- public String document;
- public Collection<String> references = new ArrayList<String>();
-
- public PdxNotDeserializableAsset() {
- throw new RuntimeException("Preventing Deserialization of Asset");
- }
-
- public PdxNotDeserializableAsset(final int assetId, final String document)
{
- super(assetId, document);
- }
-
- @Override
- public void fromData(final PdxReader reader) {
- throw new RuntimeException("Not allowing us to deserialize one of
these");
- }
- }
-
- public static class PdxAsset implements PdxSerializable {
- public int assetId;
- public String document;
- public Collection<String> references = new ArrayList<String>();
-
- public PdxAsset() {
-
- }
-
- public PdxAsset(final int assetId, final String document) {
- this.assetId = assetId;
- this.document = document;
- references.add(document + "_1");
- references.add(document + "_2");
- references.add(document + "_3");
- }
-
- @Override
- public void toData(final PdxWriter writer) {
- writer.writeString("document", document);
- writer.writeInt("assetId", assetId);
- writer.writeObject("references", references);
- }
-
- @Override
- public void fromData(final PdxReader reader) {
- this.document = reader.readString("document");
- this.assetId = reader.readInt("assetId");
- this.references = (Collection<String>) reader.readObject("references");
- }
- }
-
- public static class NotDeserializableAsset implements DataSerializable {
- private int disallowedPid;
-
- public NotDeserializableAsset() {
-
- }
-
- public NotDeserializableAsset(final int disallowedPid) {
- this.disallowedPid = disallowedPid;
- }
-
- @Override
- public void toData(final DataOutput out) throws IOException {
- out.writeInt(disallowedPid);
-
- }
-
- @Override
- public void fromData(final DataInput in) throws IOException,
ClassNotFoundException {
- disallowedPid = in.readInt();
- if (disallowedPid == DUnitEnv.get().getPid()) {
- throw new IOException("Cannot deserialize");
- }
- }
- }
-
- public class NestedKeywordObject implements Serializable {
-
- public Object date;
- public Object nonKeyword;
-
- public NestedKeywordObject(Object object) {
- this.date = object;
- }
-
- public NestedKeywordObject(Object keywordObject, Object nonKeywordObject) {
- this.date = keywordObject;
- this.nonKeyword = nonKeywordObject;
- }
- }
-
- private class PRQueryTraceTestHook implements DefaultQuery.TestHook,
Serializable {
- private HashMap<String, Boolean> hooks = new HashMap<String, Boolean>();
-
- public HashMap<String, Boolean> getHooks() {
- return hooks;
- }
-
- @Override
- public void doTestHook(int spot) {
-
- }
-
- @Override
- public void doTestHook(String spot) {
- hooks.put(spot, Boolean.TRUE);
- }
- }
-}
diff --git
a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ClientCacheRule.java
b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ClientCacheRule.java
new file mode 100644
index 0000000..a97f302
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ClientCacheRule.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.test.dunit.rules;
+
+import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
+import static org.apache.geode.test.dunit.DistributedTestUtils.getLocators;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.geode.cache.client.ClientCacheFactory;
+import org.apache.geode.cache.client.internal.InternalClientCache;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.test.dunit.Disconnect;
+import org.apache.geode.test.dunit.VM;
+
+/**
+ * JUnit Rule that creates ClientCache instances in DistributedTest VMs without
+ * {@code CacheTestCase}.
+ *
+ * <p>
+ * {@code ClientCacheRule} follows the standard convention of using a {@code
Builder} for
+ * configuration as introduced in the JUnit {@code Timeout} rule.
+ *
+ * <p>
+ * {@code ClientCacheRule} can be used in DistributedTests as a {@code Rule}:
+ *
+ * <pre>
+ * {@literal @}ClassRule
+ * public static DistributedTestRule distributedTestRule = new
DistributedTestRule();
+ *
+ * {@literal @}Rule
+ * public ClientCacheRule clientCacheRule = new ClientCacheRule();
+ *
+ * {@literal @}Test
+ * public void hasClient() {
+ * vm0.invoke(() -> clientCacheRule.createClientCache());
+ * }
+ * </pre>
+ */
+@SuppressWarnings({"serial", "unused"})
+public class ClientCacheRule extends DistributedExternalResource {
+
+ private static volatile InternalClientCache clientCache;
+
+ private final boolean createClientCache;
+ private final boolean disconnectAfter;
+ private final List<VM> createClientCacheInVMs;
+ private final Properties config;
+ private final Properties systemProperties;
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public ClientCacheRule() {
+ this(new Builder());
+ }
+
+ ClientCacheRule(final Builder builder) {
+ createClientCache = builder.createClientCache;
+ disconnectAfter = builder.disconnectAfter;
+ createClientCacheInVMs = builder.createClientCacheInVMs;
+ config = builder.config;
+ systemProperties = builder.systemProperties;
+ }
+
+ @Override
+ protected void before() {
+ if (createClientCache) {
+ createClientCache(config, systemProperties);
+ }
+ for (VM vm : createClientCacheInVMs) {
+ vm.invoke(() -> createClientCache(config, systemProperties));
+ }
+ }
+
+ @Override
+ protected void after() {
+ closeAndNullClientCache();
+ invoker().invokeInEveryVMAndController(() -> closeAndNullClientCache());
+
+ if (disconnectAfter) {
+ Disconnect.disconnectAllFromDS();
+ }
+ }
+
+ public InternalClientCache getClientCache() {
+ return clientCache;
+ }
+
+ public InternalDistributedSystem getSystem() {
+ return clientCache.getInternalDistributedSystem();
+ }
+
+ public void createClientCache() {
+ clientCache = (InternalClientCache) new
ClientCacheFactory(config).create();
+ }
+
+ public void createClientCache(final ClientCacheFactory ClientCacheFactory) {
+ clientCache = (InternalClientCache) ClientCacheFactory.create();
+ }
+
+ public void createClientCache(final Properties config) {
+ clientCache = (InternalClientCache) new
ClientCacheFactory(config).create();
+ }
+
+ public void createClientCache(final Properties config, final Properties
systemProperties) {
+ System.getProperties().putAll(systemProperties);
+ clientCache = (InternalClientCache) new
ClientCacheFactory(config).create();
+ }
+
+ public InternalClientCache getOrCreateClientCache() {
+ if (clientCache == null) {
+ createClientCache();
+ assertThat(clientCache).isNotNull();
+ }
+ return clientCache;
+ }
+
+ private static void closeAndNullClientCache() {
+ closeClientCache();
+ nullClientCache();
+ }
+
+ private static void closeClientCache() {
+ try {
+ if (clientCache != null) {
+ clientCache.close();
+ }
+ } catch (Exception ignored) {
+ // ignored
+ }
+ }
+
+ private static void nullClientCache() {
+ clientCache = null;
+ }
+
+ /**
+ * Builds an instance of ClientCacheRule.
+ */
+ public static class Builder {
+
+ private boolean createClientCache;
+ private boolean disconnectAfter;
+ private List<VM> createClientCacheInVMs = new ArrayList<>();
+ private Properties config = new Properties();
+ private Properties systemProperties = new Properties();
+
+ public Builder() {
+ config.setProperty(LOCATORS, getLocators());
+ }
+
+ /**
+ * Create ClientCache in specified VM. Default is none.
+ */
+ public Builder createClientCacheIn(final VM vm) {
+ if (!createClientCacheInVMs.contains(vm)) {
+ createClientCacheInVMs.add(vm);
+ }
+ return this;
+ }
+
+ /**
+ * Create Cache in local JVM (controller). Default is false.
+ */
+ public Builder createClientCacheInLocal() {
+ createClientCache = true;
+ return this;
+ }
+
+ /**
+ * Disconnect from DistributedSystem in all VMs after each test.
ClientCache is always closed
+ * regardless. Default is false.
+ */
+ public Builder disconnectAfter() {
+ disconnectAfter = true;
+ return this;
+ }
+
+ public Builder replaceConfig(final Properties config) {
+ this.config = config;
+ return this;
+ }
+
+ public Builder addConfig(final String key, final String value) {
+ config.put(key, value);
+ return this;
+ }
+
+ public Builder addConfig(final Properties config) {
+ this.config.putAll(config);
+ return this;
+ }
+
+ public Builder addSystemProperty(final String key, final String value) {
+ systemProperties.put(key, value);
+ return this;
+ }
+
+ public Builder addSystemProperties(final Properties config) {
+ systemProperties.putAll(config);
+ return this;
+ }
+
+ public ClientCacheRule build() {
+ return new ClientCacheRule(this);
+ }
+ }
+}
--
To stop receiving notification emails like this one, please contact
[email protected].