ygerzhedovich commented on a change in pull request #9423:
URL: https://github.com/apache/ignite/pull/9423#discussion_r723933658



##########
File path: 
modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/stat/StatisticsProcessor.java
##########
@@ -0,0 +1,345 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.stat;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.function.Function;
+
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.NodeStoppingException;
+import 
org.apache.ignite.internal.processors.query.stat.config.StatisticsColumnConfiguration;
+import 
org.apache.ignite.internal.processors.query.stat.config.StatisticsObjectConfiguration;
+import 
org.apache.ignite.internal.processors.query.stat.task.GatherPartitionStatistics;
+import org.apache.ignite.thread.IgniteThreadPoolExecutor;
+
+/**
+ * Process all tasks, related to statistics repository. Mostly - statistics 
collection,
+ * invalidation (due to configuration, topology or obsolescence issues) and 
loads.
+ * Input tasks should be scheduled throug management pool while gathering pool 
used to process heavy
+ * operations in parallel.
+ *
+ * Manage gathering pool and it's jobs. To guarantee gracefull shutdown:
+ * 1) Any job can be added into gatheringInProgress only in active state 
(check after adding)
+ * 2) State can be disactivated only after cancelling all jobs and getting 
busyLock block
+ * 3) Each job should do it's work in busyLock with periodically checking of 
it's cancellation status.
+ */
+public class StatisticsProcessor {
+    /** Logger. */
+    private final IgniteLogger log;
+
+    /** Ignite statistics repository. */
+    private final IgniteStatisticsRepository statRepo;
+
+    /** Ignite Thread pool executor to do statistics collection tasks. */
+    private final BusyExecutor gatheringBusyExecutor;
+
+    /** (cacheGroupId -> gather context) */
+    private final ConcurrentMap<StatisticsKey, 
LocalStatisticsGatheringContext> gatheringInProgress =
+        new ConcurrentHashMap<>();
+
+    /**
+     * Constructor.
+     *
+     * @param repo IgniteStatisticsRepository.
+     * @param gatherPool Thread pool to gather statistics in.
+     * @param logSupplier Log supplier function.
+     */
+    public StatisticsProcessor(
+        IgniteStatisticsRepository repo,
+        IgniteThreadPoolExecutor gatherPool,
+        Function<Class<?>, IgniteLogger> logSupplier
+    ) {
+        this.statRepo = repo;
+        this.gatheringBusyExecutor = new BusyExecutor("gathering", gatherPool, 
logSupplier);
+        this.log = logSupplier.apply(StatisticsProcessor.class);
+    }
+
+    /**
+     * Update statistics for the given key to actual state.
+     * If byObsolescence and tbl is not {@code null} - does not clear any 
other partitions.
+     *
+     * 1) Replace previous gathering context if exist and needed (replace 
byObsolescence gathering with new one or
+     * replace gathering with older configuration or topology version with new 
one).
+     * 2) If byObsolescence and no table awailable - clean obsolescence and 
partition statistics for the given key.
+     * 3) Submit tasks for each specified partition.
+     * 4) after last task finish gathering - it starts aggregation.
+     * 5) read all partitions & obsolescence from repo and
+     * if byObsolescence = {@code true} - remove unnecessary one and aggregate 
by specified list
+     * if byObsolescence = {@code false} - aggregate all presented in store 
(because it should contains only actual ones)
+     * 5) save aggregated local statistics
+     *
+     * @param ctx Statistics Gathering context.
+     */
+    public void updateLocalStatistics(LocalStatisticsGatheringContext ctx) {
+        if (log.isDebugEnabled()) {
+            log.debug(String.format(
+                "Start statistics processing: forceRecollect=%b, cfg=%s, 
partToProcess = %s, topVer=%s",
+                ctx.forceRecollect(), ctx.configuration(), ctx.allParts(), 
ctx.topologyVersion()));
+        }
+
+        if (registerNewTask(ctx)) {
+            try {
+                if (ctx.forceRecollect())
+                    // To save all obsolescence info even if there is no 
partitions to recollect.
+                    statRepo.saveObsolescenceInfo(ctx.configuration().key());
+
+                if (ctx.table() == null || ctx.configuration() == null || 
ctx.configuration().columns().isEmpty()) {
+                    
statRepo.clearLocalPartitionsStatistics(ctx.configuration().key(), null);
+                    ctx.future().complete(null);
+
+                    return;
+                }
+
+                if (ctx.remainingParts().isEmpty())
+                    ctx.future().complete(null);
+                else
+                    submitTasks(ctx);
+            }
+            catch (Throwable t) {
+                // Submit tasks can't fire an error, so no need to cancel and 
wait for tasks finished here.
+                ctx.future().completeExceptionally(t);
+            }
+        }
+        else {
+            if (log.isDebugEnabled())
+                log.debug("Gathered by key " + ctx.configuration().key() + " 
were skipped due to previous one.");
+        }
+    }
+
+    /**
+     * Try to register new task. Returned task will remove itself from 
gatheringInProgress after completion.
+     * If there are some other task for the given key - operation will be 
scheduled right after it if necessary
+     * (current task have newer configuration or topology).
+     *
+     * @param ctx Task to register.
+     * @return {@code true} if task was actually pushed as Gathering in 
progress task, {@code false} - othrewise.
+     */
+    private boolean registerNewTask(LocalStatisticsGatheringContext ctx) {
+        boolean res[] = new boolean[1];
+
+        gatheringInProgress.compute(ctx.configuration().key(), (k, v) -> {
+            if (v == null) {
+                // No old context - start new
+                res[0] = true;
+
+                ctx.future().whenComplete((r, t) -> {
+                    if (t != null) {
+                        if (t instanceof CancellationException || t instanceof 
NodeStoppingException) {
+                            if (log.isDebugEnabled())
+                                log.debug("Got " + t.getClass() + " exception 
during statistics collection by key "
+                                    + ctx.configuration().key() + "." );
+                        }
+                        else
+                            log.warning("Unexpected error during statistics 
collection by key " +
+                                ctx.configuration().key() + " . " + 
t.getMessage(), t);
+                    }
+
+                    // Anyway - try to remove itself from gathering map.
+                    gatheringInProgress.remove(ctx.configuration().key(), ctx);
+                });
+
+                return ctx;
+            }
+
+            // If there are key - check if we can cancel it.
+            if (v.topologyVersion() == null ||
+                (ctx.topologyVersion() != null && 
v.topologyVersion().compareTo(ctx.topologyVersion()) < 0) ||
+                v.configuration().compareTo(ctx.configuration()) < 0) {
+                // Old context for older topology or config - cancel and start 
new
+                v.cancel();
+
+                v.future().whenComplete((r, t) -> {
+                    // Will be executed before original, so have to try to 
cancel previous context to add new one.
+                    gatheringInProgress.remove(ctx.configuration().key(), v);
+
+                    boolean rescheduled = gatheringBusyExecutor.busyRun(() -> 
updateLocalStatistics(ctx));
+
+                    if (!rescheduled && log.isDebugEnabled()) {
+                        log.debug("Unable to reschedule statistics task by key 
" + ctx.configuration().key()
+                            + " due to inactive state.");

Review comment:
       it's not so, busyRun could return false in case any exception also




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to