joerghoh commented on code in PR #587:
URL: https://github.com/apache/jackrabbit-oak/pull/587#discussion_r908295427


##########
oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/DocumentStoreIndexerBase.java:
##########
@@ -265,6 +271,53 @@ public void reindex() throws CommitFailedException, 
IOException {
         indexerSupport.postIndexWork(copyOnWriteStore);
     }
 
+    private void indexParallel(List<FlatFileStore> storeList, CompositeIndexer 
indexer, IndexingProgressReporter progressReporter) {
+        ExecutorService service = 
Executors.newFixedThreadPool(INDEX_THREAD_POOL_SIZE);
+        List<Future> futureList = new ArrayList<>();
+
+        for (FlatFileStore item : storeList) {
+            Future future = service.submit(new Callable<Boolean>() {
+                @Override
+                public Boolean call() throws IOException, 
CommitFailedException {
+                    for (NodeStateEntry entry : item) {
+                        reportDocumentRead(entry.getPath(), progressReporter);
+                        indexer.index(entry);
+                    }
+                    return true;
+                }
+            });
+            futureList.add(future);
+        }
+
+        try {
+            for (Future future : futureList) {
+                future.get();
+            }
+            log.info("All {} indexing jobs are done", storeList.size());
+            service.shutdown();

Review Comment:
   ```service.shutdown()``` is not executed in case of exceptions.



##########
oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/progress/IndexingProgressReporter.java:
##########
@@ -229,10 +231,12 @@ public IndexUpdateState(String indexPath, boolean 
reindex, long estimatedCount)
         }
 
         public void indexUpdate() throws CommitFailedException {
-            updateCount++;
-            if (updateCount % 10000 == 0) {
-                log.info("{} => Indexed {} nodes in {} ...", indexPath, 
updateCount, watch);
-                watch.reset().start();
+            updateCount.incrementAndGet();
+            if (updateCount.get() % 10000 == 0) {
+                synchronized (this) {
+                    log.info("{} => Indexed {} nodes in {} ...", indexPath, 
updateCount, watch);

Review Comment:
   instead of ```updateCount``` the local copy should be used as well.



##########
oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/FlatFileSplitter.java:
##########
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.index.indexer.document.flatfile;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.jackrabbit.oak.api.PropertyState;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.commons.Compression;
+import org.apache.jackrabbit.oak.index.indexer.document.NodeStateEntry;
+import org.apache.jackrabbit.oak.plugins.index.search.Aggregate;
+import org.apache.jackrabbit.oak.plugins.index.search.IndexDefinition;
+import org.apache.jackrabbit.oak.query.ast.NodeTypeInfo;
+import org.apache.jackrabbit.oak.query.ast.NodeTypeInfoProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.Stack;
+import java.util.stream.Collectors;
+
+import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYTYPE;
+import static org.apache.jackrabbit.JcrConstants.NT_BASE;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileNodeStoreBuilder.DEFAULT_NUMBER_OF_SPLIT_STORE_SIZE;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileNodeStoreBuilder.OAK_INDEXER_USE_LZ4;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileNodeStoreBuilder.OAK_INDEXER_USE_ZIP;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileNodeStoreBuilder.PROP_SPLIT_STORE_SIZE;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileStoreUtils.createReader;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileStoreUtils.createWriter;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileStoreUtils.getSortedStoreFileName;
+
+/**
+ * This class is being used when {@link 
FlatFileNodeStoreBuilder.OAK_INDEXER_PARALLEL_INDEX} is set to true.
+ * It will split a flat file safely by checking the index definitions. An 
entry is considered safe to split if only
+ * none of the parent directories contains nodes in indexRule and aggregate 
fields of the provided index definitions.
+ */
+public class FlatFileSplitter {
+    private static final Logger log = 
LoggerFactory.getLogger(FlatFileSplitter.class);
+
+    private static final String SPLIT_DIR_NAME = "split";
+    private static final long MINIMUM_SPLIT_THRESHOLD = 10 * FileUtils.ONE_MB;
+
+    private final File workDir;
+    private final NodeTypeInfoProvider infoProvider;
+    private final File flatFile;
+    private final NodeStateEntryReader entryReader;
+    private final Compression.Algorithm algorithm;
+    private Set<IndexDefinition> indexDefinitions;
+    private Set<String> splitNodeTypeNames;
+    private long minimumSplitThreshold = MINIMUM_SPLIT_THRESHOLD;
+    private int splitSize = Integer.getInteger(PROP_SPLIT_STORE_SIZE, 
DEFAULT_NUMBER_OF_SPLIT_STORE_SIZE);
+    private boolean useCompression = 
Boolean.parseBoolean(System.getProperty(OAK_INDEXER_USE_ZIP, "true"));
+    private boolean useLZ4 = 
Boolean.parseBoolean(System.getProperty(OAK_INDEXER_USE_LZ4, "false"));
+
+    public FlatFileSplitter(File flatFile, File workdir, NodeTypeInfoProvider 
infoProvider, NodeStateEntryReader entryReader,
+            Set<IndexDefinition> indexDefinitions) {
+        this.flatFile = flatFile;
+        this.indexDefinitions = indexDefinitions;
+        this.workDir = new File(workdir, SPLIT_DIR_NAME);
+
+        this.infoProvider = infoProvider;
+        this.entryReader = entryReader;
+
+        Compression.Algorithm algorithm = Compression.Algorithm.GZIP;

Review Comment:
   for a more natural understanding I would set the default to none, and then 
check provided parameters if to set to LZ4 or GZIP.



##########
oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/progress/IndexingProgressReporter.java:
##########
@@ -229,10 +231,12 @@ public IndexUpdateState(String indexPath, boolean 
reindex, long estimatedCount)
         }
 
         public void indexUpdate() throws CommitFailedException {
-            updateCount++;
-            if (updateCount % 10000 == 0) {
-                log.info("{} => Indexed {} nodes in {} ...", indexPath, 
updateCount, watch);
-                watch.reset().start();
+            updateCount.incrementAndGet();
+            if (updateCount.get() % 10000 == 0) {

Review Comment:
   there's a racecondition between this and the previous line. It should look 
like this:
   
   ```
   final long count = updateCount.incrementAndGet();
   if (count % 10000 == 0) {
   ```



##########
oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/FlatFileSplitter.java:
##########
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.index.indexer.document.flatfile;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.jackrabbit.oak.api.PropertyState;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.commons.Compression;
+import org.apache.jackrabbit.oak.index.indexer.document.NodeStateEntry;
+import org.apache.jackrabbit.oak.plugins.index.search.Aggregate;
+import org.apache.jackrabbit.oak.plugins.index.search.IndexDefinition;
+import org.apache.jackrabbit.oak.query.ast.NodeTypeInfo;
+import org.apache.jackrabbit.oak.query.ast.NodeTypeInfoProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.Stack;
+import java.util.stream.Collectors;
+
+import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYTYPE;
+import static org.apache.jackrabbit.JcrConstants.NT_BASE;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileNodeStoreBuilder.DEFAULT_NUMBER_OF_SPLIT_STORE_SIZE;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileNodeStoreBuilder.OAK_INDEXER_USE_LZ4;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileNodeStoreBuilder.OAK_INDEXER_USE_ZIP;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileNodeStoreBuilder.PROP_SPLIT_STORE_SIZE;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileStoreUtils.createReader;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileStoreUtils.createWriter;
+import static 
org.apache.jackrabbit.oak.index.indexer.document.flatfile.FlatFileStoreUtils.getSortedStoreFileName;
+
+/**
+ * This class is being used when {@link 
FlatFileNodeStoreBuilder.OAK_INDEXER_PARALLEL_INDEX} is set to true.
+ * It will split a flat file safely by checking the index definitions. An 
entry is considered safe to split if only
+ * none of the parent directories contains nodes in indexRule and aggregate 
fields of the provided index definitions.
+ */
+public class FlatFileSplitter {
+    private static final Logger log = 
LoggerFactory.getLogger(FlatFileSplitter.class);
+
+    private static final String SPLIT_DIR_NAME = "split";
+    private static final long MINIMUM_SPLIT_THRESHOLD = 10 * FileUtils.ONE_MB;
+
+    private final File workDir;
+    private final NodeTypeInfoProvider infoProvider;
+    private final File flatFile;
+    private final NodeStateEntryReader entryReader;
+    private final Compression.Algorithm algorithm;
+    private Set<IndexDefinition> indexDefinitions;
+    private Set<String> splitNodeTypeNames;
+    private long minimumSplitThreshold = MINIMUM_SPLIT_THRESHOLD;
+    private int splitSize = Integer.getInteger(PROP_SPLIT_STORE_SIZE, 
DEFAULT_NUMBER_OF_SPLIT_STORE_SIZE);
+    private boolean useCompression = 
Boolean.parseBoolean(System.getProperty(OAK_INDEXER_USE_ZIP, "true"));
+    private boolean useLZ4 = 
Boolean.parseBoolean(System.getProperty(OAK_INDEXER_USE_LZ4, "false"));
+
+    public FlatFileSplitter(File flatFile, File workdir, NodeTypeInfoProvider 
infoProvider, NodeStateEntryReader entryReader,
+            Set<IndexDefinition> indexDefinitions) {
+        this.flatFile = flatFile;
+        this.indexDefinitions = indexDefinitions;
+        this.workDir = new File(workdir, SPLIT_DIR_NAME);
+
+        this.infoProvider = infoProvider;
+        this.entryReader = entryReader;
+
+        Compression.Algorithm algorithm = Compression.Algorithm.GZIP;
+        if (!useCompression) {
+            algorithm = Compression.Algorithm.NONE;
+        } else if (useLZ4) {
+            algorithm = Compression.Algorithm.LZ4;
+        }
+        this.algorithm = algorithm;
+    }
+
+    private List<File> returnOriginalFlatFile() {
+        return Collections.singletonList(flatFile);
+    }
+
+    public List<File> split() throws IOException {
+        return split(true);
+    }
+
+    public List<File> split(boolean deleteOriginal) throws IOException {
+        List<File> splitFlatFiles = new ArrayList<>();
+        try {
+            FileUtils.forceMkdir(workDir);
+        } catch (IOException e) {
+            log.error("failed to create split directory {}", 
workDir.getAbsolutePath());
+            return returnOriginalFlatFile();
+        }
+
+        long fileSizeInBytes = flatFile.length();
+        long splitThreshold = Math.round((double) (fileSizeInBytes / 
splitSize));
+        log.info("original flat file size: ~{}",  
FileUtils.byteCountToDisplaySize(fileSizeInBytes));
+        log.info("split threshold is ~{} bytes, estimate split size >={} 
files",  FileUtils.byteCountToDisplaySize(splitThreshold), splitSize);
+
+        // return original if file too small or split size equals 1

Review Comment:
   move this up (before the 2 info statements); it does not make sense to list 
them if we don't need to split at all.



##########
oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/writer/IndexWriterUtils.java:
##########
@@ -33,12 +35,13 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
 import org.apache.lucene.analysis.shingle.ShingleAnalyzerWrapper;
+import org.apache.lucene.index.ConcurrentMergeScheduler;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.SerialMergeScheduler;
 
-import static 
org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.VERSION;
-
 public class IndexWriterUtils {
+    private static final int INDEX_WRITER_MAX_MERGE = 8;
+    private static final int INDEX_WRITER_MAX_THREAD = 8;

Review Comment:
   does it make sense to make this configurable?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to