tkalkirill commented on code in PR #3352:
URL: https://github.com/apache/ignite-3/pull/3352#discussion_r1515948065


##########
modules/page-memory/src/main/java/org/apache/ignite/internal/pagememory/tree/BplusTree.java:
##########
@@ -2854,7 +2854,9 @@ protected long destroyDownPages(
      *     execution to a {@link 
org.apache.ignite.internal.pagememory.util.GradualTaskExecutor}.
      * @throws IgniteInternalCheckedException If failed.
      */
-    public final GradualTask startGradualDestruction(@Nullable Consumer<L> c, 
boolean forceDestroy) throws IgniteInternalCheckedException {
+    public final GradualTask startGradualDestruction(
+            @Nullable Consumer<L> c, boolean forceDestroy, int maxWorkUnits

Review Comment:
   missing `maxWorkUnits` in javadoc



##########
modules/storage-page-memory/src/main/java/org/apache/ignite/internal/storage/pagememory/PersistentPageMemoryStorageEngine.java:
##########
@@ -171,22 +191,36 @@ public CompletableFuture<?> 
onCreate(ConfigurationNotificationEvent<PersistentPa
                 return nullCompletedFuture();
             }
         });
+
+        // TODO: remove this executor, see 
https://issues.apache.org/jira/browse/IGNITE-21683
+        destructionExecutor = new ThreadPoolExecutor(
+                0,
+                Runtime.getRuntime().availableProcessors(),
+                100,

Review Comment:
   Maybe `0` ?



##########
modules/storage-page-memory/src/main/java/org/apache/ignite/internal/storage/pagememory/VolatilePageMemoryStorageEngine.java:
##########
@@ -51,6 +54,13 @@ public class VolatilePageMemoryStorageEngine implements 
StorageEngine {
     /** Engine name. */
     public static final String ENGINE_NAME = "aimem";
 
+    /** 
+     * Maximum "work units" that are allowed to be used during {@link 
BplusTree} destruction.
+     *
+     * @see BplusTree#startGradualDestruction
+     */
+    public static final int MAX_DESTRUCTION_WORK_UNITS = 1_000_000;

Review Comment:
   I would also make it `1_000` so that we can clean indexes from different 
regions quickly and clean memory more evenly.



##########
modules/storage-page-memory/src/main/java/org/apache/ignite/internal/storage/pagememory/VolatilePageMemoryStorageEngine.java:
##########
@@ -103,23 +113,31 @@ public CompletableFuture<?> 
onCreate(ConfigurationNotificationEvent<VolatilePage
             }
         });
 
-        ThreadPoolExecutor destructionThreadPool = new ThreadPoolExecutor(
+        // TODO: remove this executor, see 
https://issues.apache.org/jira/browse/IGNITE-21683
+        destructionExecutor = new ThreadPoolExecutor(
                 0,
                 Runtime.getRuntime().availableProcessors(),
                 100,

Review Comment:
   Maybe `0` ?



##########
modules/storage-api/src/main/java/org/apache/ignite/internal/storage/util/MvPartitionStorages.java:
##########
@@ -432,6 +434,26 @@ private String 
createStorageInProgressOfRebalanceErrorMessage(int partitionId) {
         return null;
     }
 
+    /**
+     * Returns a list of all existing storages.
+     *
+     * <p>Note: this method may produce races when a rebalance is happening 
concurrently as the underlying storage array may change.
+     * The callers of this method should resolve these races themselves.
+     */
+    public List<T> getAll() {
+        var list = new ArrayList<T>(storageByPartitionId.length());

Review Comment:
   Optional: It would be possible to write more beautifully on the streams.



##########
modules/page-memory/src/test/java/org/apache/ignite/internal/pagememory/util/GradualTaskExecutorTest.java:
##########
@@ -57,6 +58,8 @@ class GradualTaskExecutorTest extends BaseIgniteAbstractTest {
     @AfterEach
     void stopExecutor() {
         executor.close();
+
+        IgniteUtils.shutdownAndAwaitTermination(executorService, 10, 
TimeUnit.SECONDS);

Review Comment:
   Maybe something like:
   ```
           IgniteUtils.closeAll(
                   executor::close,
                   () -> 
IgniteUtils.shutdownAndAwaitTermination(executorService, 10, TimeUnit.SECONDS)
           );
   ```
   



##########
modules/storage-page-memory/src/main/java/org/apache/ignite/internal/storage/pagememory/mv/ConsistentGradualTaskExecutor.java:
##########
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.storage.pagememory.mv;
+
+import java.util.concurrent.ExecutorService;
+import org.apache.ignite.internal.pagememory.util.GradualTask;
+import org.apache.ignite.internal.pagememory.util.GradualTaskExecutor;
+import org.apache.ignite.internal.storage.MvPartitionStorage;
+import org.apache.ignite.internal.storage.StorageException;
+
+/**
+ * A {@link GradualTaskExecutor} that wraps every task step into {@link 
MvPartitionStorage#runConsistently}.
+ */
+class ConsistentGradualTaskExecutor extends GradualTaskExecutor {
+    private final MvPartitionStorage mvPartitionStorage;
+
+    ConsistentGradualTaskExecutor(MvPartitionStorage partitionStorage, 
ExecutorService executor) {
+        super(executor);
+
+        this.mvPartitionStorage = partitionStorage;
+    }
+
+    @Override
+    protected void runStep(GradualTask task) {
+        mvPartitionStorage.runConsistently(locker -> {
+            try {
+                task.runStep();
+
+                return null;
+            } catch (Exception e) {
+                throw new StorageException(e);

Review Comment:
   Can it wrap only if not `StorageException`?



##########
modules/storage-api/src/main/java/org/apache/ignite/internal/storage/util/StorageUtils.java:
##########
@@ -129,6 +131,8 @@ public static void 
throwExceptionDependingOnStorageState(StorageState state, Str
                 throw new 
StorageRebalanceException(createStorageInProcessOfRebalanceErrorMessage(storageInfo));
             case CLEANUP:
                 throw new 
StorageException(createStorageInProcessOfCleanupErrorMessage(storageInfo));
+            case DESTROYING:
+                throw new 
StorageException(createStorageDestroyedErrorMessage(storageInfo));

Review Comment:
   I propose to add a special exception so that we can handle the situation 
that the index is destroyed and we simply skip it.



##########
modules/storage-page-memory/src/main/java/org/apache/ignite/internal/storage/pagememory/mv/AbstractPageMemoryMvPartitionStorage.java:
##########
@@ -218,6 +229,8 @@ public PageMemoryHashIndexStorage 
getOrCreateHashIndex(StorageHashIndexDescripto
      * @param indexDescriptor Index descriptor.
      */
     public PageMemorySortedIndexStorage 
getOrCreateSortedIndex(StorageSortedIndexDescriptor indexDescriptor) {
+        assert !hashIndexes.containsKey(indexDescriptor.id());

Review Comment:
   Will there be races between the two maps?



##########
modules/storage-page-memory/src/main/java/org/apache/ignite/internal/storage/pagememory/mv/AbstractPageMemoryMvPartitionStorage.java:
##########
@@ -206,6 +215,8 @@ public int partitionId() {
      * @param indexDescriptor Index descriptor.
      */
     public PageMemoryHashIndexStorage 
getOrCreateHashIndex(StorageHashIndexDescriptor indexDescriptor) {
+        assert !sortedIndexes.containsKey(indexDescriptor.id());

Review Comment:
   Will there be races between the two maps?



##########
modules/storage-page-memory/src/main/java/org/apache/ignite/internal/storage/pagememory/mv/AbstractPageMemoryMvPartitionStorage.java:
##########
@@ -969,4 +979,48 @@ IndexMeta createIndexMetaForNewIndex(int indexId) {
             return sortedIndexes.get(indexId);
         });
     }
+
+    /**
+     * Destroys an index storage identified by the given index ID.
+     *
+     * @param indexId Index ID which storage will be destroyed.
+     * @return Future that will be completed as soon as the storage has been 
destroyed.
+     */
+    // TODO: Index users should be able to handle the case, when an index is 
being concurrently destroyed, see
+    //  https://issues.apache.org/jira/browse/IGNITE-20126
+    public CompletableFuture<Void> destroyIndex(int indexId) {
+        return busy(() -> {
+            CompletableFuture<Void> result = nullCompletedFuture();
+
+            PageMemoryHashIndexStorage hashIndexStorage = 
hashIndexes.remove(indexId);
+
+            if (hashIndexStorage != null) {
+                assert !sortedIndexes.containsKey(indexId);

Review Comment:
   ```suggestion
                   assert !sortedIndexes.containsKey(indexId) : indexId;
   ```



##########
modules/storage-page-memory/src/main/java/org/apache/ignite/internal/storage/pagememory/mv/VolatilePageMemoryMvPartitionStorage.java:
##########
@@ -187,79 +197,64 @@ public void lastAppliedOnRebalance(long lastAppliedIndex, 
long lastAppliedTerm)
         this.lastAppliedTerm = lastAppliedTerm;
     }
 
-    @Override
-    protected List<AutoCloseable> getResourcesToClose(boolean goingToDestroy) {
-        List<AutoCloseable> resourcesToClose = 
super.getResourcesToClose(goingToDestroy);
-
-        if (!goingToDestroy) {
-            // If we are going to destroy after closure, we should retain 
indices because the destruction logic
-            // will need to destroy them as well. It will clean the maps after 
it starts the destruction.
-
-            resourcesToClose.add(hashIndexes::clear);
-            resourcesToClose.add(sortedIndexes::clear);
-        }
-
-        return resourcesToClose;
-    }
-
     /**
-     * Cleans data backing this partition. Indices are destroyed, but index 
desscriptors are
-     * not removed from this partition so that they can be refilled with data 
later.
+     * Transitions this storage to the {@link StorageState#DESTROYING} state.
      */
-    public void cleanStructuresData() {
-        destroyStructures(false);
-    }
+    public void transitionToDestroyingState() {
+        while (true) {
+            StorageState curState = state.get();
+
+            if (curState == StorageState.CLOSED || curState == 
StorageState.DESTROYING) {
+                throwExceptionDependingOnStorageState(curState, 
createStorageInfo());
+            } else if (state.compareAndSet(curState, StorageState.DESTROYING)) 
{
+                break;
+            }
+        }
 
-    /**
-     * Destroys internal structures (including indices) backing this partition.
-     */
-    public void destroyStructures() {
-        destroyStructures(true);
+        
hashIndexes.values().forEach(PageMemoryHashIndexStorage::transitionToDestroyingState);
+        
sortedIndexes.values().forEach(PageMemorySortedIndexStorage::transitionToDestroyingState);
     }
 
     /**
      * Destroys internal structures (including indices) backing this partition.
-     *
-     * @param removeIndexDescriptors Whether indices should be completely 
removed, not just their contents destroyed.
      */
-    private void destroyStructures(boolean removeIndexDescriptors) {
-        startMvDataDestruction();
-        startIndexMetaTreeDestruction();
-        startGarbageCollectionTreeDestruction();
+    public CompletableFuture<Void> destroyStructures() {
+        Stream<CompletableFuture<?>> destroyFutures = Stream.of(
+                startMvDataDestruction(),
+                startIndexMetaTreeDestruction(),
+                startGarbageCollectionTreeDestruction()
+        );
+
+        Stream<CompletableFuture<Void>> hashIndexDestroyFutures = 
hashIndexes.values()
+                .stream()
+                .map(indexStorage -> 
indexStorage.startDestructionOn(destructionExecutor));
+
+        Stream<CompletableFuture<Void>> sortedIndexDestroyFutures = 
sortedIndexes.values()
+                .stream()
+                .map(indexStorage -> 
indexStorage.startDestructionOn(destructionExecutor));
+
+        Stream<CompletableFuture<Void>> indexDestroyFutures = 
Stream.concat(hashIndexDestroyFutures, sortedIndexDestroyFutures);
 
-        hashIndexes.values().forEach(indexStorage -> 
indexStorage.startDestructionOn(destructionExecutor));
-        sortedIndexes.values().forEach(indexStorage -> 
indexStorage.startDestructionOn(destructionExecutor));
+        CompletableFuture<?>[] allDestroyFutures = 
Stream.concat(destroyFutures, 
indexDestroyFutures).toArray(CompletableFuture[]::new);
 
         lastAppliedIndex = 0;
         lastAppliedTerm = 0;
         groupConfig = null;
 
-        if (removeIndexDescriptors) {
-            hashIndexes.clear();
-            sortedIndexes.clear();
-        }
+        return CompletableFuture.allOf(allDestroyFutures);

Review Comment:
   Maybe use static import?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to