http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java index e700fa7..69bebdf 100755 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java @@ -36,10 +36,10 @@ import com.gemstone.gemfire.test.dunit.Assert; import com.gemstone.gemfire.test.dunit.AsyncInvocation; import com.gemstone.gemfire.test.dunit.Host; import com.gemstone.gemfire.test.dunit.Invoke; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; import com.gemstone.gemfire.test.dunit.SerializableCallable; import com.gemstone.gemfire.test.dunit.SerializableRunnable; -import com.gemstone.gemfire.test.dunit.Threads; +import com.gemstone.gemfire.test.dunit.ThreadUtils; import com.gemstone.gemfire.test.dunit.VM; /** @@ -90,7 +90,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends final int redundancy = 1; createPartitionRegionAsynch("testMetaDataCleanupOnSinglePRNodeFail_", startIndexForRegion, endIndexForRegion, localMaxMemory, redundancy, -1); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testMetaDataCleanupOnSinglePRNodeFail() - PartitionedRegion's created at all VM nodes"); @@ -100,7 +100,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends // disconnect vm0. DistributedMember dsMember = (DistributedMember)vmArr[0].invoke(this, "disconnectMethod"); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "testMetaDataCleanupOnSinglePRNodeFail() - VM = " + dsMember + " disconnected from the distributed system "); @@ -108,7 +108,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends vmArr[1].invoke(validateNodeFailMetaDataCleanUp(dsMember)); vmArr[2].invoke(validateNodeFailMetaDataCleanUp(dsMember)); vmArr[3].invoke(validateNodeFailMetaDataCleanUp(dsMember)); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testMetaDataCleanupOnSinglePRNodeFail() - Validation of Failed node config metadata complete"); @@ -117,11 +117,11 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends vmArr[2].invoke(validateNodeFailbucket2NodeCleanUp(dsMember)); vmArr[3].invoke(validateNodeFailbucket2NodeCleanUp(dsMember)); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testMetaDataCleanupOnSinglePRNodeFail() - Validation of Failed node bucket2Node Region metadata complete"); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testMetaDataCleanupOnSinglePRNodeFail() Completed Successfuly .........."); } @@ -136,7 +136,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends Cache c = getCache(); Region rootReg = PartitionedRegionHelper.getPRRoot(c); // Region allPRs = PartitionedRegionHelper.getPRConfigRegion(rootReg, c); - rootReg.getAttributesMutator().addCacheListener(new CertifiableTestCacheListener(LogWriterSupport.getLogWriter())); + rootReg.getAttributesMutator().addCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter())); } }; @@ -199,7 +199,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends final int redundancy = 1; createPartitionRegionAsynch("testMetaDataCleanupOnMultiplePRNodeFail_", startIndexForRegion, endIndexForRegion, localMaxMemory, redundancy, -1); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testMetaDataCleanupOnMultiplePRNodeFail() - PartitionedRegion's created at all VM nodes"); @@ -208,7 +208,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends // disconnect vm0 DistributedMember dsMember = (DistributedMember)vmArr[0].invoke(this, "disconnectMethod"); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "testMetaDataCleanupOnMultiplePRNodeFail() - VM = " + dsMember + " disconnected from the distributed system "); @@ -231,7 +231,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends // disconnect vm1 DistributedMember dsMember2 = (DistributedMember)vmArr[1].invoke(this, "disconnectMethod"); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "testMetaDataCleanupOnMultiplePRNodeFail() - VM = " + dsMember2 + " disconnected from the distributed system "); @@ -254,18 +254,18 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends vmArr[2].invoke(validateNodeFailMetaDataCleanUp(dsMember2)); vmArr[3].invoke(validateNodeFailMetaDataCleanUp(dsMember2)); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testMetaDataCleanupOnMultiplePRNodeFail() - Validation of Failed nodes config metadata complete"); vmArr[2].invoke(validateNodeFailbucket2NodeCleanUp(dsMember2)); vmArr[3].invoke(validateNodeFailbucket2NodeCleanUp(dsMember2)); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testMetaDataCleanupOnMultiplePRNodeFail() - Validation of Failed nodes bucket2Node Region metadata complete"); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testMetaDataCleanupOnMultiplePRNodeFail() Completed Successfuly .........."); } @@ -293,8 +293,8 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends assertEquals(2, cls.length); CertifiableTestCacheListener ctcl = (CertifiableTestCacheListener) cls[1]; - LogWriterSupport.getLogWriter().info("Listener update (" + ctcl.updates.size() + "): " + ctcl.updates) ; - LogWriterSupport.getLogWriter().info("Listener destroy: (" + ctcl.destroys.size() + "): " + ctcl.destroys) ; + LogWriterUtils.getLogWriter().info("Listener update (" + ctcl.updates.size() + "): " + ctcl.updates) ; + LogWriterUtils.getLogWriter().info("Listener destroy: (" + ctcl.destroys.size() + "): " + ctcl.destroys) ; Iterator itrator = rootReg.keySet().iterator(); for (Iterator itr = itrator; itr.hasNext();) { @@ -381,7 +381,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends DistributedMember dsMember = ((InternalDistributedSystem)getCache() .getDistributedSystem()).getDistributionManager().getId(); getCache().getDistributedSystem().disconnect(); - LogWriterSupport.getLogWriter().info("disconnectMethod() completed .."); + LogWriterUtils.getLogWriter().info("disconnectMethod() completed .."); return dsMember; } @@ -399,7 +399,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends redundancy, localMaxMemory, recoveryDelay)); } for (int count2 = 0; count2 < async.length; count2++) { - Threads.join(async[count2], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count2], 30 * 1000); } for (int count2 = 0; count2 < async.length; count2++) { @@ -449,7 +449,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends assertEquals(bucketOwners.size(), redundantCopies + 1); DistributedMember bucketOwner = (DistributedMember) bucketOwners.iterator().next(); assertNotNull(bucketOwner); - LogWriterSupport.getLogWriter().info("Selected distributed member " + bucketOwner + " to disconnect because it hosts bucketId " + bucketId); + LogWriterUtils.getLogWriter().info("Selected distributed member " + bucketOwner + " to disconnect because it hosts bucketId " + bucketId); return bucketOwner; } }); @@ -459,7 +459,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends Map stillHasDS = Invoke.invokeInEveryVM(new SerializableCallable("Disconnect provided bucketHost") { public Object call() throws Exception { if (getSystem().getDistributedMember().equals(bucketHost)) { - LogWriterSupport.getLogWriter().info("Disconnecting distributed member " + getSystem().getDistributedMember()); + LogWriterUtils.getLogWriter().info("Disconnecting distributed member " + getSystem().getDistributedMember()); disconnectFromDS(); return Boolean.FALSE; } @@ -522,7 +522,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends assertEquals(pr.getRedundantCopies() + 1, owners.size()); break; // retry loop } catch (ForceReattemptException retryIt) { - LogWriterSupport.getLogWriter().info("Need to retry validation for bucket in PR " + pr, retryIt); + LogWriterUtils.getLogWriter().info("Need to retry validation for bucket in PR " + pr, retryIt); } } while (true); // retry loop } // bucketId loop
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java index b5b090b..28e1bfb 100755 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java @@ -34,7 +34,7 @@ import com.gemstone.gemfire.cache.util.ObjectSizer; import com.gemstone.gemfire.cache30.CacheSerializableRunnable; import com.gemstone.gemfire.internal.cache.lru.Sizeable; import com.gemstone.gemfire.test.dunit.Host; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; import com.gemstone.gemfire.test.dunit.SerializableRunnable; import com.gemstone.gemfire.test.dunit.VM; @@ -178,7 +178,7 @@ public class PartitionedRegionLocalMaxMemoryDUnitTest extends i++; } assertEquals(1, pr.getDataStore().localBucket2RegionMap.size()); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "putObjectInPartitionRegion() - Put operation done successfully"); } else { @@ -191,7 +191,7 @@ public class PartitionedRegionLocalMaxMemoryDUnitTest extends fail("Bucket gets created even if no memory is available"); } catch (PartitionedRegionStorageException e) { - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "putObjectInPartitionRegion()- got correct PartitionedRegionStorageException while creating bucket when no memory is available"); } http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java index 82aa308..22d1fd7 100644 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java @@ -23,8 +23,8 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable; import com.gemstone.gemfire.test.dunit.Assert; import com.gemstone.gemfire.test.dunit.AsyncInvocation; import com.gemstone.gemfire.test.dunit.Host; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; -import com.gemstone.gemfire.test.dunit.Threads; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; +import com.gemstone.gemfire.test.dunit.ThreadUtils; import com.gemstone.gemfire.test.dunit.VM; /** @@ -95,22 +95,22 @@ public class PartitionedRegionMultipleDUnitTest extends /** creationg and performing put(),get() operations on Partition Region */ createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionPutAndGet() - Partition Regions Successfully Created "); validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionPutAndGet() - Partition Regions Successfully Validated "); putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionPutAndGet() - Put() Operation done Successfully in Partition Regions "); getInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionPutAndGet() - Partition Regions Successfully Validated "); } @@ -150,38 +150,38 @@ public class PartitionedRegionMultipleDUnitTest extends */ createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyKeys() - Partition Regions Successfully Created "); validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyKeys() - Partition Regions Successfully Validated "); putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyKeys() - Put() Operation done Successfully in Partition Regions "); destroyInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyKeys() - Destroy(Key) Operation done Successfully in Partition Regions "); getDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion, afterPutFlag); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyKeys() - Get() Operation after destoy keys done Successfully in Partition Regions "); putDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyKeys() - Put() Operation after destroy keys done Successfully in Partition Regions "); afterPutFlag = 1; getDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion, afterPutFlag); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyKeys() - Get() Operation after Put() done Successfully in Partition Regions "); } @@ -212,22 +212,22 @@ public class PartitionedRegionMultipleDUnitTest extends /** creating Partition Regions and testing for the APIs contains() */ createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyAndContainsAPI() - Partition Regions Successfully Created "); validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyAndContainsAPI() - Partition Regions Successfully Validated "); putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyAndContainsAPI() - Put() Operation done Successfully in Partition Regions "); destroyInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion, endIndexForRegion); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyAndContainsAPI() - Destroy(Key) Operation done Successfully in Partition Regions "); async[0] = vm0.invokeAsync(validateContainsAPIForPartitionRegion( @@ -240,7 +240,7 @@ public class PartitionedRegionMultipleDUnitTest extends startIndexForRegion, endIndexForRegion)); for (int count = 0; count < AsyncInvocationArrSize; count++) { - Threads.join(async[count], 120 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count], 120 * 1000); } for (int count = 0; count < AsyncInvocationArrSize; count++) { @@ -249,7 +249,7 @@ public class PartitionedRegionMultipleDUnitTest extends } } - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPartitionedRegionDestroyAndContainsAPI() - Validation of Contains APIs done Successfully in Partition Regions "); } @@ -300,7 +300,7 @@ public class PartitionedRegionMultipleDUnitTest extends startIndexForRegion, endIndexForRegion)); for (int count = 0; count < AsyncInvocationArrSize; count++) { - Threads.join(async[count], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count], 30 * 1000); } for (int count = 0; count < AsyncInvocationArrSize; count++) { @@ -336,7 +336,7 @@ public class PartitionedRegionMultipleDUnitTest extends /** main thread is waiting for the other threads to complete */ for (int count = 0; count < AsyncInvocationArrSize; count++) { - Threads.join(async[count], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count], 30 * 1000); } for (int count = 0; count < AsyncInvocationArrSize; count++) { @@ -369,7 +369,7 @@ public class PartitionedRegionMultipleDUnitTest extends endIndexForRegion)); /** main thread is waiting for the other threads to complete */ for (int count = 0; count < AsyncInvocationArrSize; count++) { - Threads.join(async[count], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count], 30 * 1000); } for (int count = 0; count < AsyncInvocationArrSize; count++) { @@ -408,7 +408,7 @@ public class PartitionedRegionMultipleDUnitTest extends /** main thread is waiting for the other threads to complete */ for (int count = 0; count < AsyncInvocationArrSize; count++) { - Threads.join(async[count], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count], 30 * 1000); } for (int count = 0; count < AsyncInvocationArrSize; count++) { @@ -466,7 +466,7 @@ public class PartitionedRegionMultipleDUnitTest extends } } - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "validateContainsAPIForPartitionRegion() - Get() Validations done Successfully in Partition Region " + pr.getName()); @@ -481,7 +481,7 @@ public class PartitionedRegionMultipleDUnitTest extends } } - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "validateContainsAPIForPartitionRegion() - containsKey() Validations done Successfully in Partition Region " + pr.getName()); @@ -496,7 +496,7 @@ public class PartitionedRegionMultipleDUnitTest extends assertTrue(conKey); } } - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "validateContainsAPIForPartitionRegion() - containsValueForKey() Validations done Successfully in Partition Region " + pr.getName()); @@ -510,7 +510,7 @@ public class PartitionedRegionMultipleDUnitTest extends assertTrue(conKey); } } - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "validateContainsAPIForPartitionRegion() - containsValue() Validations done Successfully in Partition Region " + pr.getName()); @@ -545,7 +545,7 @@ public class PartitionedRegionMultipleDUnitTest extends startIndexForRegion, endIndexForRegion, afterPutFlag)); /** main thread is waiting for the other threads to complete */ for (int count = 0; count < AsyncInvocationArrSize; count++) { - Threads.join(async[count], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count], 30 * 1000); if (async[count].exceptionOccurred()) { Assert.fail("exception during " + count, async[count].getException()); } @@ -588,7 +588,7 @@ public class PartitionedRegionMultipleDUnitTest extends /** main thread is waiting for the other threads to complete */ for (int count = 0; count < AsyncInvocationArrSize; count++) { - Threads.join(async[count], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count], 30 * 1000); } for (int count = 0; count < AsyncInvocationArrSize; count++) { http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java index e7dc716..f35b39a 100755 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java @@ -26,8 +26,8 @@ import com.gemstone.gemfire.cache30.*; import com.gemstone.gemfire.test.dunit.Assert; import com.gemstone.gemfire.test.dunit.AsyncInvocation; import com.gemstone.gemfire.test.dunit.Host; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; -import com.gemstone.gemfire.test.dunit.Threads; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; +import com.gemstone.gemfire.test.dunit.ThreadUtils; import com.gemstone.gemfire.test.dunit.VM; /** @@ -85,7 +85,7 @@ public class PartitionedRegionPRIDDUnitTest extends // Create 1/2 * MAX_REGIONS regions in VM 0,1,2 with scope D_ACK. createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion, localMaxMemory, redundancy, prPrefix); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPRIDGenerationInMultiplePartitionRegion() - Partition regions on 3 nodes successfully created"); @@ -101,7 +101,7 @@ public class PartitionedRegionPRIDDUnitTest extends // VM 3 contains regions from id MAX_REGIONS to 2*MAX_REGIONS only. createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion, localMaxMemory, pr2_redundancy, prPrefix); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter() .info( "testPRIDGenerationInMultiplePartitionRegion() - Partition regions on 4 nodes successfully created"); // validating PRID generation for multiple partition regions @@ -118,7 +118,7 @@ public class PartitionedRegionPRIDDUnitTest extends /** main thread is waiting for the other threads to complete */ for (int count = 0; count < AsyncInvocationArrSize; count++) { - Threads.join(async[count], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[count], 30 * 1000); } for (int count = 0; count < AsyncInvocationArrSize; count++) { @@ -208,10 +208,10 @@ public class PartitionedRegionPRIDDUnitTest extends if (prIdPRSet.size() != PartitionedRegion.prIdToPR.size()) fail("Duplicate PRID are generated in prIdToPR"); - LogWriterSupport.getLogWriter().info("Size of allPartition region : " + prIdSet.size()); - LogWriterSupport.getLogWriter() + LogWriterUtils.getLogWriter().info("Size of allPartition region : " + prIdSet.size()); + LogWriterUtils.getLogWriter() .info("Size of prIdToPR region : " + prIdPRSet.size()); - LogWriterSupport.getLogWriter().info("PRID generated successfully"); + LogWriterUtils.getLogWriter().info("PRID generated successfully"); } }; return validatePRID; @@ -235,7 +235,7 @@ public class PartitionedRegionPRIDDUnitTest extends numNodes++; } for (int i = 0; i < numNodes; i++) { - Threads.join(async[i], 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async[i], 30 * 1000); } for (int i = 0; i < numNodes; i++) { http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java index 415e709..42b34dd 100644 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java @@ -60,8 +60,8 @@ import com.gemstone.gemfire.internal.cache.partitioned.QueryMessage; import com.gemstone.gemfire.pdx.JSONFormatter; import com.gemstone.gemfire.test.dunit.Assert; import com.gemstone.gemfire.test.dunit.Host; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; -import com.gemstone.gemfire.test.dunit.NetworkSupport; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; +import com.gemstone.gemfire.test.dunit.NetworkUtils; import com.gemstone.gemfire.test.dunit.SerializableCallable; import com.gemstone.gemfire.test.dunit.SerializableRunnable; import com.gemstone.gemfire.test.dunit.VM; @@ -944,8 +944,8 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase { @Override public Object call() throws Exception { ClientCacheFactory cf = new ClientCacheFactory(); - cf.addPoolServer(NetworkSupport.getServerHostName(server1.getHost()), port1); - cf.addPoolServer(NetworkSupport.getServerHostName(server2.getHost()), port2); + cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1); + cf.addPoolServer(NetworkUtils.getServerHostName(server2.getHost()), port2); ClientCache cache = getClientCache(cf); Region region = cache.createClientRegionFactory( @@ -983,12 +983,12 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase { SerializableRunnable closeCache = new CacheSerializableRunnable( "Close Client") { public void run2() throws CacheException { - LogWriterSupport.getLogWriter().info("### Close Client. ###"); + LogWriterUtils.getLogWriter().info("### Close Client. ###"); try { closeCache(); disconnectFromDS(); } catch (Exception ex) { - LogWriterSupport.getLogWriter().info("### Failed to get close client. ###"); + LogWriterUtils.getLogWriter().info("### Failed to get close client. ###"); } } }; http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java index e8e881f..53c219f 100755 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java @@ -35,7 +35,7 @@ import static org.junit.Assert.*; import com.gemstone.gemfire.cache.Region; import com.gemstone.gemfire.distributed.DistributedSystem; -import com.gemstone.gemfire.test.dunit.Threads; +import com.gemstone.gemfire.test.dunit.ThreadUtils; import com.gemstone.gemfire.test.junit.categories.IntegrationTest; import junit.framework.TestCase; @@ -82,7 +82,7 @@ public class PartitionedRegionSerializableObjectJUnitTest for (int i = 0; i < MAX_THREADS; i++) { threadArr[i].start(); - Threads.join(threadArr[i], 30 * 1000, null); + ThreadUtils.join(threadArr[i], 30 * 1000); } for (int i = 0; i < MAX_THREADS; i++) { http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java index 1c14c36..55fed01 100755 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java @@ -67,11 +67,11 @@ import com.gemstone.gemfire.internal.cache.execute.data.ShipmentId; import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil; import com.gemstone.gemfire.test.dunit.Assert; import com.gemstone.gemfire.test.dunit.AsyncInvocation; -import com.gemstone.gemfire.test.dunit.DistributedTestSupport; +import com.gemstone.gemfire.test.dunit.DistributedTestUtils; import com.gemstone.gemfire.test.dunit.Host; import com.gemstone.gemfire.test.dunit.IgnoredException; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; -import com.gemstone.gemfire.test.dunit.NetworkSupport; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; +import com.gemstone.gemfire.test.dunit.NetworkUtils; import com.gemstone.gemfire.test.dunit.VM; import com.gemstone.gemfire.test.dunit.Wait; import com.gemstone.gemfire.test.dunit.WaitCriterion; @@ -130,7 +130,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { } finally { - DistributedTestSupport.unregisterAllDataSerializersFromAllVms(); + DistributedTestUtils.unregisterAllDataSerializersFromAllVms(); } } @@ -196,7 +196,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -210,7 +210,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -224,7 +224,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -238,7 +238,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); return port; @@ -603,7 +603,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { public void test_SingleHopWithHAWithLocator() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -989,7 +989,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { for (Entry entry : clientMap.entrySet()) { List list = (List)entry.getValue(); if(list.size()<4){ - LogWriterSupport.getLogWriter().info("still waiting for 4 bucket owners in " + entry.getKey() + ": " + list); + LogWriterUtils.getLogWriter().info("still waiting for 4 bucket owners in " + entry.getKey() + ": " + list); finished = false; break; } @@ -1090,7 +1090,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { member2.invoke(PartitionedRegionSingleHopDUnitTest.class, "verifyMetadata", new Object[]{fclientMap}); member3.invoke(PartitionedRegionSingleHopDUnitTest.class, "verifyMetadata", new Object[]{fclientMap}); } catch (Exception e) { - LogWriterSupport.getLogWriter().info("verification failed", e); + LogWriterUtils.getLogWriter().info("verification failed", e); return false; } return true; @@ -1414,7 +1414,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1428,7 +1428,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1441,7 +1441,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -1454,7 +1454,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); replicatedRegion = cache.createRegion("rr", new AttributesFactory().create()); @@ -1484,7 +1484,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1498,7 +1498,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1512,7 +1512,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -1526,7 +1526,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); @@ -1554,7 +1554,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { // attr.setConcurrencyChecksEnabled(true); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1570,7 +1570,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { // attr.setConcurrencyChecksEnabled(true); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1586,7 +1586,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { // attr.setConcurrencyChecksEnabled(true); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -1602,7 +1602,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { // attr.setConcurrencyChecksEnabled(true); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); @@ -1638,7 +1638,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { // attr.setConcurrencyChecksEnabled(true); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1654,7 +1654,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { // attr.setConcurrencyChecksEnabled(true); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1670,7 +1670,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { // attr.setConcurrencyChecksEnabled(true); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -1686,7 +1686,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { // attr.setConcurrencyChecksEnabled(true); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); @@ -1724,7 +1724,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1738,7 +1738,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1752,7 +1752,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -1766,7 +1766,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); @@ -1802,7 +1802,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1815,7 +1815,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1828,7 +1828,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -1841,7 +1841,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attr.setConcurrencyChecksEnabled(true); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); replicatedRegion = cache.createRegion("rr", new AttributesFactory().create()); @@ -1957,7 +1957,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { RegionAttributes attrs = factory.create(); region = cache.createRegion(PR_NAME, attrs); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1968,7 +1968,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attrs = factory.create(); customerRegion = cache.createRegion("CUSTOMER", attrs); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1979,7 +1979,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attrs = factory.create(); orderRegion = cache.createRegion("ORDER", attrs); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region ORDER created Successfully :" + orderRegion.toString()); @@ -1990,7 +1990,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase { attrs = factory.create(); shipmentRegion = cache.createRegion("SHIPMENT", attrs); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region SHIPMENT created Successfully :" + shipmentRegion.toString()); factory = new AttributesFactory(); http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java index 9f21031..87738b8 100644 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java @@ -42,12 +42,12 @@ import com.gemstone.gemfire.internal.cache.execute.data.OrderId; import com.gemstone.gemfire.internal.cache.execute.data.ShipmentId; import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil; import com.gemstone.gemfire.test.dunit.Assert; -import com.gemstone.gemfire.test.dunit.DistributedTestSupport; +import com.gemstone.gemfire.test.dunit.DistributedTestUtils; import com.gemstone.gemfire.test.dunit.Host; import com.gemstone.gemfire.test.dunit.IgnoredException; import com.gemstone.gemfire.test.dunit.Invoke; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; -import com.gemstone.gemfire.test.dunit.NetworkSupport; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; +import com.gemstone.gemfire.test.dunit.NetworkUtils; import com.gemstone.gemfire.test.dunit.SerializableRunnable; import com.gemstone.gemfire.test.dunit.VM; import com.gemstone.gemfire.test.dunit.Wait; @@ -157,7 +157,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes } finally { - DistributedTestSupport.unregisterAllDataSerializersFromAllVms(); + DistributedTestUtils.unregisterAllDataSerializersFromAllVms(); } } @@ -177,7 +177,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWith2ServerGroup() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -211,7 +211,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWith2ServerGroup2() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -245,7 +245,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWith2ServerGroup2WithoutSystemProperty() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -274,7 +274,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroupAccessor() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -307,7 +307,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroupOneServerInTwoGroups() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -346,7 +346,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroupWithOneDefaultServer() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -379,7 +379,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroupClientServerGroupNull() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -412,7 +412,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroupTwoClientServerGroup() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -459,7 +459,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroupTwoClientServerGroup2() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -503,7 +503,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroupTwoClientOneWithOneWithoutServerGroup() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -542,7 +542,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroup2ClientInOneVMServerGroup() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -586,7 +586,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes public void test_SingleHopWithServerGroupColocatedRegionsInDifferentGroup() { int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final String host0 = NetworkSupport.getServerHostName(member3.getHost()); + final String host0 = NetworkUtils.getServerHostName(member3.getHost()); final String locator = host0 + "[" + port3 + "]"; member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "startLocatorInVM", new Object[] { port3 }); @@ -827,7 +827,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -840,7 +840,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -853,7 +853,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -866,7 +866,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); return port; @@ -913,7 +913,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -926,7 +926,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -939,7 +939,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -952,7 +952,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); return port; @@ -999,7 +999,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1012,7 +1012,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1025,7 +1025,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -1038,7 +1038,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); @@ -1051,7 +1051,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); region2 = cache.createRegion(PR_NAME2, attr.create()); assertNotNull(region2); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME2 + " created Successfully :" + region2.toString()); @@ -1064,7 +1064,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); customerRegion2 = cache.createRegion(CUSTOMER2, attr.create()); assertNotNull(customerRegion2); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER2 created Successfully :" + customerRegion2.toString()); @@ -1077,7 +1077,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); orderRegion2 = cache.createRegion(ORDER2, attr.create()); assertNotNull(orderRegion2); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER2 created Successfully :" + orderRegion2.toString()); @@ -1090,7 +1090,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); shipmentRegion2 = cache.createRegion(SHIPMENT2, attr.create()); assertNotNull(shipmentRegion2); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT2 created Successfully :" + shipmentRegion2.toString()); @@ -1193,7 +1193,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes RegionAttributes attrs = factory.create(); region = cache.createRegion(PR_NAME, attrs); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1202,7 +1202,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); customerRegion = cache.createRegion("CUSTOMER", attrs); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1211,7 +1211,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); orderRegion = cache.createRegion("ORDER", attrs); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region ORDER created Successfully :" + orderRegion.toString()); @@ -1220,7 +1220,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); shipmentRegion = cache.createRegion("SHIPMENT", attrs); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region SHIPMENT created Successfully :" + shipmentRegion.toString()); } @@ -1232,7 +1232,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes RegionAttributes attrs = factory.create(); region = cache.createRegion(PR_NAME, attrs); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1241,7 +1241,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); customerRegion = cache.createRegion("CUSTOMER", attrs); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1250,7 +1250,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); orderRegion = cache.createRegion("ORDER", attrs); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region ORDER created Successfully :" + orderRegion.toString()); @@ -1259,7 +1259,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); shipmentRegion = cache.createRegion("SHIPMENT", attrs); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region SHIPMENT created Successfully :" + shipmentRegion.toString()); @@ -1270,7 +1270,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); region2 = cache.createRegion(PR_NAME2, attrs); assertNotNull(region2); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region " + PR_NAME2 + " created Successfully :" + region2.toString()); @@ -1279,7 +1279,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); customerRegion2 = cache.createRegion(CUSTOMER2, attrs); assertNotNull(customerRegion2); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region CUSTOMER2 created Successfully :" + customerRegion2.toString()); @@ -1288,7 +1288,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); orderRegion2 = cache.createRegion(ORDER2, attrs); assertNotNull(orderRegion2); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region ORDER2 created Successfully :" + orderRegion2.toString()); @@ -1297,7 +1297,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); shipmentRegion2 = cache.createRegion(SHIPMENT2, attrs); assertNotNull(shipmentRegion2); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region SHIPMENT2 created Successfully :" + shipmentRegion2.toString()); } @@ -1309,7 +1309,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes RegionAttributes attrs = factory.create(); region = cache.createRegion(PR_NAME, attrs); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1318,7 +1318,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); customerRegion = cache.createRegion("CUSTOMER", attrs); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1327,7 +1327,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); orderRegion = cache.createRegion("ORDER", attrs); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region ORDER created Successfully :" + orderRegion.toString()); @@ -1336,7 +1336,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attrs = factory.create(); shipmentRegion = cache.createRegion("SHIPMENT", attrs); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Distributed Region SHIPMENT created Successfully :" + shipmentRegion.toString()); @@ -1368,7 +1368,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes region = cache.createRegion(PR_NAME, attr.create()); assertNotNull(region); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region " + PR_NAME + " created Successfully :" + region.toString()); @@ -1381,7 +1381,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); customerRegion = cache.createRegion("CUSTOMER", attr.create()); assertNotNull(customerRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString()); @@ -1393,7 +1393,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); orderRegion = cache.createRegion("ORDER", attr.create()); assertNotNull(orderRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region ORDER created Successfully :" + orderRegion.toString()); @@ -1405,7 +1405,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes attr.setPartitionAttributes(paf.create()); shipmentRegion = cache.createRegion("SHIPMENT", attr.create()); assertNotNull(shipmentRegion); - LogWriterSupport.getLogWriter().info( + LogWriterUtils.getLogWriter().info( "Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString()); return port; http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java index 05f595c..cda653a 100644 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java @@ -34,10 +34,10 @@ import com.gemstone.gemfire.internal.logging.InternalLogWriter; import com.gemstone.gemfire.test.dunit.Assert; import com.gemstone.gemfire.test.dunit.AsyncInvocation; import com.gemstone.gemfire.test.dunit.Host; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; import com.gemstone.gemfire.test.dunit.SerializableCallable; import com.gemstone.gemfire.test.dunit.SerializableRunnable; -import com.gemstone.gemfire.test.dunit.Threads; +import com.gemstone.gemfire.test.dunit.ThreadUtils; import com.gemstone.gemfire.test.dunit.VM; /** @@ -120,7 +120,7 @@ public class PartitionedRegionSizeDUnitTest extends public void run2() { Cache cache = getCache(); - final int oldLevel = setLogLevel(LogWriterSupport.getLogWriter(), InternalLogWriter.WARNING_LEVEL); + final int oldLevel = setLogLevel(LogWriterUtils.getLogWriter(), InternalLogWriter.WARNING_LEVEL); for (int j = 0; j < MAX_REGIONS; j++) { Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + "DistAckSync" + j); @@ -130,7 +130,7 @@ public class PartitionedRegionSizeDUnitTest extends pr.put(key, value); } } - setLogLevel(LogWriterSupport.getLogWriter(), oldLevel); + setLogLevel(LogWriterUtils.getLogWriter(), oldLevel); } }); @@ -222,7 +222,7 @@ public class PartitionedRegionSizeDUnitTest extends public void run2() { Cache cache = getCache(); - final int oldLevel = setLogLevel(LogWriterSupport.getLogWriter(), InternalLogWriter.WARNING_LEVEL); + final int oldLevel = setLogLevel(LogWriterUtils.getLogWriter(), InternalLogWriter.WARNING_LEVEL); for (int j = 0; j < MAX_REGIONS; j++) { Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + "DistAckASync" + j); @@ -232,11 +232,11 @@ public class PartitionedRegionSizeDUnitTest extends pr.put(key, value); } } - setLogLevel(LogWriterSupport.getLogWriter(), oldLevel); + setLogLevel(LogWriterUtils.getLogWriter(), oldLevel); } }); - Threads.join(async0, 30 * 1000, LogWriterSupport.getLogWriter()); + ThreadUtils.join(async0, 30 * 1000); if (async0.exceptionOccurred()) { Assert.fail("Exception during async0", async0.getException()); @@ -326,7 +326,7 @@ public class PartitionedRegionSizeDUnitTest extends public void run2() { Cache cache = getCache(); - final int oldLevel = setLogLevel(LogWriterSupport.getLogWriter(), InternalLogWriter.WARNING_LEVEL); + final int oldLevel = setLogLevel(LogWriterUtils.getLogWriter(), InternalLogWriter.WARNING_LEVEL); for (int j = 0; j < MAX_REGIONS; j++) { Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + "DistAckSyncChangingVMCount" + j); @@ -336,7 +336,7 @@ public class PartitionedRegionSizeDUnitTest extends pr.put(key, value); } } - setLogLevel(LogWriterSupport.getLogWriter(), oldLevel); + setLogLevel(LogWriterUtils.getLogWriter(), oldLevel); } }); http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c05f6798/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java index a30ff47..0690a6d 100644 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java @@ -38,7 +38,7 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable; import com.gemstone.gemfire.distributed.DistributedMember; import com.gemstone.gemfire.test.dunit.Assert; import com.gemstone.gemfire.test.dunit.Host; -import com.gemstone.gemfire.test.dunit.LogWriterSupport; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; import com.gemstone.gemfire.test.dunit.VM; /** @@ -92,7 +92,7 @@ public class PartitionedRegionTestUtilsDUnitTest extends GsRandom rand = new GsRandom(123); // Assert that its empty for(int i=0; i<5; i++) { - LogWriterSupport.getLogWriter().info("Invocation " + i + " of getSomeKeys"); + LogWriterUtils.getLogWriter().info("Invocation " + i + " of getSomeKeys"); try { Set s = null; s = pr.getSomeKeys(rand); @@ -112,17 +112,17 @@ public class PartitionedRegionTestUtilsDUnitTest extends // Assert not empty and has value in an accepable range for(int i=0; i<5; i++) { - LogWriterSupport.getLogWriter().info("Invocation " + i + " of getSomeKeys"); + LogWriterUtils.getLogWriter().info("Invocation " + i + " of getSomeKeys"); try { Set s = null; s = pr.getSomeKeys(rand); assertNotNull(s); assertFalse(s.isEmpty()); Integer val; - LogWriterSupport.getLogWriter().info("Invocation " + i + " got " + s.size() + " keys"); + LogWriterUtils.getLogWriter().info("Invocation " + i + " got " + s.size() + " keys"); for (Iterator it = s.iterator(); it.hasNext(); ) { Object key = it.next(); - LogWriterSupport.getLogWriter().info("Key: " + key); + LogWriterUtils.getLogWriter().info("Key: " + key); val = (Integer) pr.get(key); assertNotNull(val); assertTrue(val.intValue() >= 0);
