AMashenkov commented on a change in pull request #8229:
URL: https://github.com/apache/ignite/pull/8229#discussion_r485856161



##########
File path: 
modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java
##########
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ignite.internal.processors.cache.persistence.db;
+
+import java.util.Deque;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.failure.StopNodeFailureHandler;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.metric.IoStatisticsHolder;
+import org.apache.ignite.internal.pagemem.PageMemory;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import 
org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag;
+import 
org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
+import org.apache.ignite.internal.processors.failure.FailureProcessor;
+import org.apache.ignite.internal.processors.query.h2.H2RowCache;
+import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
+import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex;
+import 
org.apache.ignite.internal.processors.query.h2.database.inlinecolumn.InlineIndexColumnFactory;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
+import org.apache.ignite.internal.processors.query.h2.opt.H2Row;
+import org.apache.ignite.internal.util.lang.GridTuple3;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.h2.table.IndexColumn;
+import org.jetbrains.annotations.Nullable;
+import org.junit.Test;
+
+import static java.util.Arrays.asList;
+
+/**
+ *
+ */
+public class MultipleParallelCacheDeleteDeadlockTest extends 
GridCommonAbstractTest {
+    /** Latch that blocks test completion. */
+    private CountDownLatch testCompletionBlockingLatch = new CountDownLatch(1);
+
+    /** Latch that blocks checkpoint. */
+    private CountDownLatch checkpointBlockingLatch = new CountDownLatch(1);
+
+    /** We imitate long index destroy in these tests, so this is delay for 
each page to destroy. */
+    private static final long TIME_FOR_EACH_INDEX_PAGE_TO_DESTROY = 300;
+
+    /** */
+    private static final String CACHE_1 = "cache_1";
+
+    /** */
+    private static final String CACHE_2 = "cache_2";
+
+    /** */
+    private static final String CACHE_GRP_1 = "cache_grp_1";
+
+    /** */
+    private static final String CACHE_GRP_2 = "cache_grp_2";
+
+    /** */
+    private H2TreeIndex.H2TreeFactory regularH2TreeFactory;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String 
igniteInstanceName) throws Exception {
+        return super.getConfiguration(igniteInstanceName)
+            .setFailureHandler(new StopNodeFailureHandler())
+            .setDataStorageConfiguration(
+                new 
DataStorageConfiguration().setDefaultDataRegionConfiguration(
+                    new DataRegionConfiguration()
+                        .setPersistenceEnabled(true)
+                        .setInitialSize(10 * 1024L * 1024L)
+                        .setMaxSize(50 * 1024L * 1024L)
+                )
+                .setCheckpointFrequency(Integer.MAX_VALUE)
+            )
+            .setCacheConfiguration(
+                new CacheConfiguration(CACHE_1)
+                    .setGroupName(CACHE_GRP_1)
+                    .setSqlSchema("PUBLIC"),
+                new CacheConfiguration(CACHE_2)
+                    .setGroupName(CACHE_GRP_2)
+                    .setSqlSchema("PUBLIC")
+            );
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        cleanPersistenceDir();
+
+        regularH2TreeFactory = H2TreeIndex.h2TreeFactory;
+
+        H2TreeIndex.h2TreeFactory = H2TreeTest::new;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        H2TreeIndex.h2TreeFactory = regularH2TreeFactory;
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+
+        super.afterTest();
+    }
+
+    /** */
+    @Test
+    public void test() throws Exception {
+        IgniteEx ignite = startGrids(1);
+
+        ignite.cluster().active(true);
+
+        IgniteCache cache1 = ignite.getOrCreateCache(CACHE_1);
+        IgniteCache cache2 = ignite.getOrCreateCache(CACHE_2);
+
+        query(cache1, "create table t1(id integer primary key, f integer) with 
\"CACHE_GROUP=" + CACHE_GRP_1 + "\"");
+        query(cache1, "create index idx1 on t1(f)");
+
+        for (int i = 0; i < 500; i++)
+            query(cache1, "insert into t1 (id, f) values (?, ?)", i, i);
+
+        query(cache2, "create table t2(id integer primary key, f integer) with 
\"CACHE_GROUP=" + CACHE_GRP_2 + "\"");
+        query(cache2, "create index idx2 on t2(f)");
+
+        for (int i = 0; i < 500; i++)
+            query(cache2, "insert into t2 (id, f) values (?, ?)", i, i);
+
+        Thread checkpointer = new Thread(() -> {
+            try {
+                checkpointBlockingLatch.await();
+
+                forceCheckpoint();
+
+                testCompletionBlockingLatch.countDown();

Review comment:
       Let's make count down in finally section to allow "destroyCaches" thead 
to proceed in case of failure.
   Otherwise, "destroyCaches" thread can hang forever.
   Also it would be polite to interrupt these threads on test failure, and use 
thread.join() to be sure threads are stopped 
   and they will not hold the resources (memory) and will not affect other 
tests.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to