Author: chetanm
Date: Fri Jan 15 06:21:17 2016
New Revision: 1724733

URL: http://svn.apache.org/viewvc?rev=1724733&view=rev
Log:
OAK-3819 - Collect and expose statistics related to Segment FileStore operations

- Adding DocumentStoreStats for stats collection
- DocumentStoreStatsMBean - For exposing the JMX stats

Integration with RDB is still pending

Added:
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java
   (with props)
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java
   (with props)
    
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsTest.java
   (with props)
Modified:
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
    
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
    
jackrabbit/oak/trunk/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/DocumentNodeStoreConfigTest.groovy

Modified: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java?rev=1724733&r1=1724732&r2=1724733&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
 Fri Jan 15 06:21:17 2016
@@ -520,6 +520,7 @@ public class DocumentMK {
         private StatisticsProvider statisticsProvider = 
StatisticsProvider.NOOP;
         private BlobStoreStats blobStoreStats;
         private CacheStats blobStoreCacheStats;
+        private DocumentStoreStatsCollector documentStoreStatsCollector;
 
         public Builder() {
         }
@@ -874,6 +875,18 @@ public class DocumentMK {
             return this;
         }
 
+        public DocumentStoreStatsCollector getDocumentStoreStatsCollector() {
+            if (documentStoreStatsCollector == null) {
+                documentStoreStatsCollector = new 
DocumentStoreStats(statisticsProvider);
+            }
+            return documentStoreStatsCollector;
+        }
+
+        public Builder 
setDocumentStoreStatsCollector(DocumentStoreStatsCollector 
documentStoreStatsCollector) {
+            this.documentStoreStatsCollector = documentStoreStatsCollector;
+            return this;
+        }
+
         @CheckForNull
         public BlobStoreStats getBlobStoreStats() {
             return blobStoreStats;

Modified: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java?rev=1724733&r1=1724732&r2=1724733&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
 Fri Jan 15 06:21:17 2016
@@ -641,6 +641,16 @@ public class DocumentNodeStoreService {
             );
         }
 
+        if (mkBuilder.getDocumentStoreStatsCollector() instanceof 
DocumentStoreStatsMBean) {
+            registrations.add(
+                    registerMBean(whiteboard,
+                            DocumentStoreStatsMBean.class,
+                            (DocumentStoreStatsMBean) 
mkBuilder.getDocumentStoreStatsCollector(),
+                            DocumentStoreStatsMBean.TYPE,
+                            "DocumentStore Statistics")
+            );
+        }
+
         final long versionGcMaxAgeInSecs = toLong(prop(PROP_VER_GC_MAX_AGE), 
DEFAULT_VER_GC_MAX_AGE);
         final long blobGcMaxAgeInSecs = toLong(prop(PROP_BLOB_GC_MAX_AGE), 
DEFAULT_BLOB_GC_MAX_AGE);
 

Added: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java?rev=1724733&view=auto
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java
 (added)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java
 Fri Jan 15 06:21:17 2016
@@ -0,0 +1,355 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.document;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import javax.management.openmbean.CompositeData;
+
+import org.apache.jackrabbit.api.stats.TimeSeries;
+import org.apache.jackrabbit.oak.plugins.document.util.Utils;
+import org.apache.jackrabbit.oak.stats.MeterStats;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
+import org.apache.jackrabbit.oak.stats.StatsOptions;
+import org.apache.jackrabbit.oak.stats.TimerStats;
+import org.apache.jackrabbit.stats.TimeSeriesStatsUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Document Store statistics helper class.
+ */
+public class DocumentStoreStats implements DocumentStoreStatsCollector, 
DocumentStoreStatsMBean {
+    private final Logger perfLog = 
LoggerFactory.getLogger(DocumentStoreStats.class.getName() + ".perf");
+
+    public static final int PERF_LOG_THRESHOLD = 1;
+
+    static final String NODES_FIND_CACHED = "DOCUMENT_NODES_FIND_CACHED";
+    static final String NODES_FIND_SPLIT = "DOCUMENT_NODES_FIND_SPLIT";
+    static final String NODES_FIND_SLAVE = "DOCUMENT_NODES_FIND_SLAVE";
+    static final String NODES_FIND_PRIMARY = "DOCUMENT_NODES_FIND_PRIMARY";
+    static final String NODES_FIND_MISSING_TIMER = 
"DOCUMENT_NODES_FIND_MISSING";
+    static final String NODES_FIND_TIMER = "DOCUMENT_NODES_FIND";
+
+    static final String NODES_QUERY_FIND_READ_COUNT = 
"DOCUMENT_NODES_QUERY_FIND";
+    static final String NODES_QUERY_FILTER = "DOCUMENT_NODES_QUERY_FILTER";
+    static final String NODES_QUERY_TIMER = "DOCUMENT_NODES_QUERY";
+    static final String NODES_QUERY_SLAVE = "DOCUMENT_NODES_QUERY_SLAVE";
+    static final String NODES_QUERY_PRIMARY = "DOCUMENT_NODES_QUERY_PRIMARY";
+
+    static final String NODES_QUERY_LOCK = "DOCUMENT_NODES_QUERY_LOCK";
+    static final String NODES_QUERY_LOCK_TIMER = 
"DOCUMENT_NODES_QUERY_LOCK_TIMER";
+
+    static final String NODES_CREATE = "DOCUMENT_NODES_CREATE";
+    static final String NODES_CREATE_UPSERT = "DOCUMENT_NODES_CREATE_UPSERT";
+    static final String NODES_CREATE_SPLIT = "DOCUMENT_NODES_CREATE_SPLIT";
+    static final String NODES_CREATE_UPSERT_TIMER = 
"DOCUMENT_NODES_CREATE_UPSERT_TIMER";
+    static final String NODES_CREATE_TIMER = "DOCUMENT_NODES_CREATE_TIMER";
+    static final String NODES_UPDATE = "DOCUMENT_NODES_UPDATE";
+    static final String NODES_UPDATE_TIMER = "DOCUMENT_NODES_UPDATE_TIMER";
+
+    static final String JOURNAL_QUERY = "DOCUMENT_JOURNAL_QUERY";
+    static final String JOURNAL_CREATE = "DOCUMENT_JOURNAL_CREATE";
+    static final String JOURNAL_QUERY_TIMER = "DOCUMENT_JOURNAL_QUERY_TIMER";
+    static final String JOURNAL_CREATE_TIMER = "DOCUMENT_JOURNAL_CREATE_TIMER";
+
+
+    private final MeterStats findNodesCachedMeter;
+    private final TimerStats findNodesMissingTimer;
+    private final MeterStats findNodesSlave;
+    private final TimerStats findNodesTimer;
+    private final MeterStats findNodesPrimary;
+    private final MeterStats queryNodesSlave;
+    private final MeterStats queryNodesPrimary;
+    private final MeterStats queryNodesResult;
+    private final TimerStats queryNodesWithFilterTimer;
+    private final TimerStats queryNodesTimer;
+    private final MeterStats queryJournal;
+    private final TimerStats queryJournalTimer;
+    private final TimerStats createNodeUpsertTimer;
+    private final TimerStats createNodeTimer;
+    private final TimerStats updateNodeTimer;
+    private final MeterStats createNodeUpsertMeter;
+    private final MeterStats createNodeMeter;
+    private final MeterStats updateNodeMeter;
+    private final MeterStats createJournal;
+    private final TimerStats createJournalTimer;
+    private final MeterStats findSplitNodes;
+    private final StatisticsProvider statisticsProvider;
+    private final MeterStats queryNodesLock;
+    private final TimerStats queryNodesLockTimer;
+    private final MeterStats createSplitNodeMeter;
+
+    public DocumentStoreStats(StatisticsProvider provider) {
+        statisticsProvider = checkNotNull(provider);
+        findNodesCachedMeter = provider.getMeter(NODES_FIND_CACHED, 
StatsOptions.DEFAULT);
+        findNodesMissingTimer = provider.getTimer(NODES_FIND_MISSING_TIMER, 
StatsOptions.METRICS_ONLY);
+        findNodesTimer = provider.getTimer(NODES_FIND_TIMER, 
StatsOptions.METRICS_ONLY);
+        findSplitNodes = provider.getMeter(NODES_FIND_SPLIT, 
StatsOptions.DEFAULT);
+
+        findNodesSlave = provider.getMeter(NODES_FIND_SLAVE, 
StatsOptions.DEFAULT);
+        findNodesPrimary = provider.getMeter(NODES_FIND_PRIMARY, 
StatsOptions.DEFAULT);
+
+        queryNodesSlave = provider.getMeter(NODES_QUERY_SLAVE, 
StatsOptions.DEFAULT);
+        queryNodesPrimary = provider.getMeter(NODES_QUERY_PRIMARY, 
StatsOptions.DEFAULT);
+        queryNodesResult = provider.getMeter(NODES_QUERY_FIND_READ_COUNT, 
StatsOptions.DEFAULT);
+
+        queryNodesWithFilterTimer = provider.getTimer(NODES_QUERY_FILTER, 
StatsOptions.METRICS_ONLY);
+        queryNodesTimer = provider.getTimer(NODES_QUERY_TIMER, 
StatsOptions.METRICS_ONLY);
+
+        queryJournal = provider.getMeter(JOURNAL_QUERY, StatsOptions.DEFAULT);
+        queryJournalTimer = provider.getTimer(JOURNAL_QUERY_TIMER, 
StatsOptions.METRICS_ONLY);
+        createJournal = provider.getMeter(JOURNAL_CREATE, 
StatsOptions.DEFAULT);
+        createJournalTimer = provider.getTimer(JOURNAL_CREATE_TIMER, 
StatsOptions.METRICS_ONLY);
+
+        createNodeUpsertTimer = provider.getTimer(NODES_CREATE_UPSERT_TIMER, 
StatsOptions.METRICS_ONLY);
+        createNodeTimer = provider.getTimer(NODES_CREATE_TIMER, 
StatsOptions.METRICS_ONLY);
+        updateNodeTimer = provider.getTimer(NODES_UPDATE_TIMER, 
StatsOptions.METRICS_ONLY);
+        createNodeMeter = provider.getMeter(NODES_CREATE, 
StatsOptions.DEFAULT);
+        createNodeUpsertMeter = provider.getMeter(NODES_CREATE_UPSERT, 
StatsOptions.DEFAULT);
+        createSplitNodeMeter = provider.getMeter(NODES_CREATE_SPLIT, 
StatsOptions.DEFAULT);
+        updateNodeMeter = provider.getMeter(NODES_UPDATE, 
StatsOptions.DEFAULT);
+
+        queryNodesLock = provider.getMeter(NODES_QUERY_LOCK, 
StatsOptions.DEFAULT);
+        queryNodesLockTimer = provider.getTimer(NODES_QUERY_LOCK_TIMER, 
StatsOptions.METRICS_ONLY);
+    }
+
+    //~------------------------------------------< DocumentStoreStatsCollector 
>
+
+    @Override
+    public void doneFindCached(Collection collection, String key) {
+        //findCached call is almost done for NODES collection only
+        if (collection == Collection.NODES){
+            findNodesCachedMeter.mark();
+        }
+    }
+
+    @Override
+    public void doneFindUncached(long timeTakenNanos, Collection collection, 
String key,
+                                 boolean docFound, boolean isSlaveOk) {
+        if (collection == Collection.NODES){
+            //For now collect time for reads from primary/secondary in same 
timer
+            TimerStats timer = docFound ? findNodesTimer : 
findNodesMissingTimer;
+            timer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+
+            //For now only nodes can be looked up from slave
+            if (isSlaveOk){
+                findNodesSlave.mark();
+            } else {
+                findNodesPrimary.mark();
+            }
+
+            if (Utils.isPreviousDocId(key)){
+                findSplitNodes.mark();
+            }
+        }
+
+        perfLog(timeTakenNanos, "findUncached on key={}, isSlaveOk={}", key, 
isSlaveOk);
+    }
+
+    @Override
+    public void doneQuery(long timeTakenNanos, Collection collection, String 
fromKey, String toKey,
+                          String indexedProperty, int resultSize, long 
lockTime, boolean isSlaveOk) {
+        if (collection == Collection.NODES){
+            //Distinguish between query done with filter and without filter
+            TimerStats timer = indexedProperty != null ? 
queryNodesWithFilterTimer : queryNodesTimer;
+            timer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+
+            //Number of nodes read
+            queryNodesResult.mark(resultSize);
+
+            //Stats for queries to slaves
+            if (isSlaveOk){
+                queryNodesSlave.mark();
+            } else {
+                queryNodesPrimary.mark();
+            }
+
+            if (lockTime > 0){
+                queryNodesLock.mark();
+                queryNodesLockTimer.update(lockTime, TimeUnit.NANOSECONDS);
+            }
+
+            //TODO What more to gather
+            // - Histogram of result - How the number of children vary
+        } else if (collection == Collection.JOURNAL){
+            //Journals are read from primary and without any extra condition 
on indexedProperty
+            queryJournal.mark(resultSize);
+            queryJournalTimer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+        }
+        perfLog(timeTakenNanos, "query for children from [{}] to [{}], 
lock:{}", fromKey, toKey, lockTime);
+    }
+
+    @Override
+    public void doneCreate(long timeTakenNanos, Collection collection, 
List<String> ids, boolean insertSuccess) {
+        if (collection == Collection.NODES && insertSuccess){
+            for (String id : ids){
+                createNodeMeter.mark();
+                if (Utils.isPreviousDocId(id)){
+                    createSplitNodeMeter.mark();
+                }
+            }
+            createNodeTimer.update(timeTakenNanos / ids.size(), 
TimeUnit.NANOSECONDS);
+        } else if (collection == Collection.JOURNAL){
+            createJournal.mark(ids.size());
+            createJournalTimer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+        }
+        perfLog(timeTakenNanos, "create");
+    }
+
+    @Override
+    public void doneUpdate(long timeTakenNanos, Collection collection, int 
updateCount) {
+        //NODES - Update is called for lastRev update
+        perfLog(timeTakenNanos, "update");
+    }
+
+    @Override
+    public void doneFindAndModify(long timeTakenNanos, Collection collection, 
String key, boolean newEntry) {
+        if (collection == Collection.NODES){
+            if (newEntry){
+                createNodeUpsertMeter.mark();
+                createNodeUpsertTimer.update(timeTakenNanos, 
TimeUnit.NANOSECONDS);
+            } else {
+                updateNodeMeter.mark();
+                updateNodeTimer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+            }
+        }
+        perfLog(timeTakenNanos, "findAndModify [{}]", key);
+    }
+
+    private void perfLog(long timeTakenNanos, String logMessagePrefix, 
Object... arguments){
+        if (!perfLog.isDebugEnabled()){
+            return;
+        }
+
+        final long diff = TimeUnit.NANOSECONDS.toMillis(timeTakenNanos);
+        if (perfLog.isTraceEnabled()) {
+            // if log level is TRACE, then always log - and do that on TRACE
+            // then:
+            perfLog.trace(logMessagePrefix + " [took " + diff + "ms]",
+                    (Object[]) arguments);
+        } else if (diff > PERF_LOG_THRESHOLD) {
+            perfLog.debug(logMessagePrefix + " [took " + diff + "ms]",
+                    (Object[]) arguments);
+        }
+    }
+
+    //~--------------------------------------------< DocumentStoreStatsMBean >
+
+    @Override
+    public long getNodesFindCount() {
+        return findNodesSlave.getCount() + queryNodesPrimary.getCount();
+    }
+
+    @Override
+    public long getNodesFindQueryCount() {
+        return queryNodesSlave.getCount() + queryNodesPrimary.getCount();
+    }
+
+    @Override
+    public long getNodesReadByQueryCount() {
+        return queryNodesResult.getCount();
+    }
+
+    @Override
+    public long getNodesCreateCount() {
+        return createNodeMeter.getCount() + createNodeUpsertMeter.getCount();
+    }
+
+    @Override
+    public long getNodesUpdateCount() {
+        return updateNodeMeter.getCount();
+    }
+
+    @Override
+    public long getJournalCreateCount() {
+        return createJournal.getCount();
+    }
+
+    @Override
+    public long getJournalReadCount() {
+        return queryJournal.getCount();
+    }
+
+    @Override
+    public CompositeData getFindCachedNodesHistory() {
+        return getTimeSeriesData(NODES_FIND_CACHED, NODES_FIND_CACHED);
+    }
+
+    @Override
+    public CompositeData getFindSplitNodesHistory() {
+        return getTimeSeriesData(NODES_FIND_SPLIT, NODES_FIND_SPLIT);
+    }
+
+    @Override
+    public CompositeData getFindNodesFromPrimaryHistory() {
+        return getTimeSeriesData(NODES_FIND_PRIMARY, NODES_FIND_PRIMARY);
+    }
+
+    @Override
+    public CompositeData getFindNodesFromSlaveHistory() {
+        return getTimeSeriesData(NODES_FIND_SLAVE, NODES_FIND_SLAVE);
+    }
+
+    @Override
+    public CompositeData getQueryNodesFromSlaveHistory() {
+        return getTimeSeriesData(NODES_QUERY_SLAVE, NODES_QUERY_SLAVE);
+    }
+
+    @Override
+    public CompositeData getQueryNodesFromPrimaryHistory() {
+        return getTimeSeriesData(NODES_QUERY_PRIMARY, NODES_QUERY_PRIMARY);
+    }
+
+    @Override
+    public CompositeData getQueryNodesLockHistory() {
+        return getTimeSeriesData(NODES_QUERY_LOCK, NODES_QUERY_LOCK);
+    }
+
+    @Override
+    public CompositeData getQueryJournalHistory() {
+        return getTimeSeriesData(JOURNAL_QUERY, JOURNAL_QUERY);
+    }
+
+    @Override
+    public CompositeData getCreateJournalHistory() {
+        return getTimeSeriesData(JOURNAL_CREATE, JOURNAL_CREATE);
+    }
+
+    @Override
+    public CompositeData getCreateNodesHistory() {
+        return getTimeSeriesData(NODES_CREATE, NODES_CREATE);
+    }
+
+    @Override
+    public CompositeData getUpdateNodesHistory() {
+        return getTimeSeriesData(NODES_UPDATE, NODES_UPDATE);
+    }
+
+    private CompositeData getTimeSeriesData(String name, String desc){
+        return TimeSeriesStatsUtil.asCompositeData(getTimeSeries(name), desc);
+    }
+
+    private TimeSeries getTimeSeries(String name) {
+        return statisticsProvider.getStats().getTimeSeries(name, true);
+    }
+}

Propchange: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java?rev=1724733&view=auto
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java
 (added)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java
 Fri Jan 15 06:21:17 2016
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.plugins.document;
+
+import javax.management.openmbean.CompositeData;
+
+import aQute.bnd.annotation.ProviderType;
+
+@ProviderType
+public interface DocumentStoreStatsMBean {
+
+    String TYPE = "DocumentStoreStats";
+
+    long getNodesFindCount();
+
+    long getNodesFindQueryCount();
+
+    long getNodesReadByQueryCount();
+
+    long getNodesCreateCount();
+
+    long getNodesUpdateCount();
+
+    long getJournalCreateCount();
+
+    long getJournalReadCount();
+
+    CompositeData getFindCachedNodesHistory();
+
+    CompositeData getFindSplitNodesHistory();
+
+    CompositeData getFindNodesFromPrimaryHistory();
+
+    CompositeData getFindNodesFromSlaveHistory();
+
+    CompositeData getQueryNodesFromSlaveHistory();
+
+    CompositeData getQueryNodesFromPrimaryHistory();
+
+    CompositeData getQueryNodesLockHistory();
+
+    CompositeData getQueryJournalHistory();
+
+    CompositeData getCreateJournalHistory();
+
+    CompositeData getCreateNodesHistory();
+
+    CompositeData getUpdateNodesHistory();
+}

Propchange: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java?rev=1724733&r1=1724732&r2=1724733&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
 (original)
+++ 
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
 Fri Jan 15 06:21:17 2016
@@ -37,6 +37,7 @@ import javax.annotation.CheckForNull;
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
+import com.google.common.base.Stopwatch;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.UncheckedExecutionException;
@@ -52,6 +53,7 @@ import org.apache.jackrabbit.oak.plugins
 import org.apache.jackrabbit.oak.plugins.document.DocumentMK;
 import org.apache.jackrabbit.oak.plugins.document.DocumentStore;
 import org.apache.jackrabbit.oak.plugins.document.DocumentStoreException;
+import org.apache.jackrabbit.oak.plugins.document.DocumentStoreStatsCollector;
 import org.apache.jackrabbit.oak.plugins.document.JournalEntry;
 import org.apache.jackrabbit.oak.plugins.document.NodeDocument;
 import org.apache.jackrabbit.oak.plugins.document.Revision;
@@ -164,6 +166,8 @@ public class MongoDocumentStore implemen
 
     private final Map<String, String> metadata;
 
+    private final DocumentStoreStatsCollector stats;
+
     public MongoDocumentStore(DB db, DocumentMK.Builder builder) {
         String version = checkVersion(db);
         metadata = ImmutableMap.<String,String>builder()
@@ -172,6 +176,7 @@ public class MongoDocumentStore implemen
                 .build();
 
         this.db = db;
+        stats = builder.getDocumentStoreStatsCollector();
         nodes = db.getCollection(Collection.NODES.toString());
         clusterNodes = db.getCollection(Collection.CLUSTER_NODES.toString());
         settings = db.getCollection(Collection.SETTINGS.toString());
@@ -341,6 +346,7 @@ public class MongoDocumentStore implemen
             if (doc != null) {
                 if (preferCached ||
                         getTime() - doc.getCreated() < maxCacheAge) {
+                    stats.doneFindCached(collection, key);
                     if (doc == NodeDocument.NULL) {
                         return null;
                     }
@@ -359,6 +365,7 @@ public class MongoDocumentStore implemen
                     if (doc != null) {
                         if (preferCached ||
                                 getTime() - doc.getCreated() < maxCacheAge) {
+                            stats.doneFindCached(collection, key);
                             if (doc == NodeDocument.NULL) {
                                 return null;
                             }
@@ -438,8 +445,9 @@ public class MongoDocumentStore implemen
     protected <T extends Document> T findUncached(Collection<T> collection, 
String key, DocumentReadPreference docReadPref) {
         log("findUncached", key, docReadPref);
         DBCollection dbCollection = getDBCollection(collection);
-        final long start = PERFLOG.start();
+        final Stopwatch watch = startWatch();
         boolean isSlaveOk = false;
+        boolean docFound = true;
         try {
             ReadPreference readPreference = getMongoReadPreference(collection, 
Utils.getParentId(key), docReadPref);
 
@@ -462,6 +470,7 @@ public class MongoDocumentStore implemen
                 obj = dbCollection.findOne(getByKeyQuery(key).get(), null, 
null, ReadPreference.primary());
             }
             if(obj == null){
+                docFound = false;
                 return null;
             }
             T doc = convertFromDBObject(collection, obj);
@@ -470,8 +479,7 @@ public class MongoDocumentStore implemen
             }
             return doc;
         } finally {
-            PERFLOG.end(start, 1, "findUncached on key={}, isSlaveOk={}", key,
-                    isSlaveOk);
+            stats.doneFindUncached(watch.elapsed(TimeUnit.NANOSECONDS), 
collection, key, docFound, isSlaveOk);
         }
     }
 
@@ -552,12 +560,12 @@ public class MongoDocumentStore implemen
         DBObject query = queryBuilder.get();
         String parentId = Utils.getParentIdFromLowerLimit(fromKey);
         long lockTime = -1;
-        final long start = PERFLOG.start();
+        final Stopwatch watch  = startWatch();
         Lock lock = withLock ? nodeLocks.acquireExclusive(parentId != null ? 
parentId : "") : null;
+        boolean isSlaveOk = false;
+        int resultSize = 0;
         try {
-            if (start != -1) {
-                lockTime = System.currentTimeMillis() - start;
-            }
+            lockTime = withLock ? watch.elapsed(TimeUnit.MILLISECONDS) : -1;
             DBCursor cursor = dbCollection.find(query).sort(BY_ID_ASC);
             if (!disableIndexHint) {
                 cursor.hint(hint);
@@ -570,6 +578,7 @@ public class MongoDocumentStore implemen
                     getMongoReadPreference(collection, parentId, 
getDefaultReadPreference(collection));
 
             if(readPreference.isSlaveOk()){
+                isSlaveOk = true;
                 LOG.trace("Routing call to secondary for fetching children 
from [{}] to [{}]", fromKey, toKey);
             }
 
@@ -588,6 +597,7 @@ public class MongoDocumentStore implemen
                     }
                     list.add(doc);
                 }
+                resultSize = list.size();
             } finally {
                 cursor.close();
             }
@@ -596,7 +606,8 @@ public class MongoDocumentStore implemen
             if (lock != null) {
                 lock.unlock();
             }
-            PERFLOG.end(start, 1, "query for children from [{}] to [{}], 
lock:{}", fromKey, toKey, lockTime);
+            stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, 
fromKey, toKey,
+                    indexedProperty, resultSize, lockTime, isSlaveOk);
         }
     }
 
@@ -701,7 +712,8 @@ public class MongoDocumentStore implemen
         if (collection == Collection.NODES) {
             lock = nodeLocks.acquire(updateOp.getId());
         }
-        final long start = PERFLOG.start();
+        final Stopwatch watch = startWatch();
+        boolean newEntry = false;
         try {
             // get modCount of cached document
             Long modCount = null;
@@ -737,6 +749,11 @@ public class MongoDocumentStore implemen
             // perform operation and get complete document
             QueryBuilder query = createQueryForUpdate(updateOp.getId(), 
updateOp.getConditions());
             DBObject oldNode = dbCollection.findAndModify(query.get(), null, 
null /*sort*/, false /*remove*/, update, false /*returnNew*/, upsert);
+
+            if (oldNode == null){
+                newEntry = true;
+            }
+
             if (checkConditions && oldNode == null) {
                 return null;
             }
@@ -764,7 +781,7 @@ public class MongoDocumentStore implemen
             if (lock != null) {
                 lock.unlock();
             }
-            PERFLOG.end(start, 1, "findAndModify [{}]", updateOp.getId());
+            stats.doneFindAndModify(watch.elapsed(TimeUnit.NANOSECONDS), 
collection, updateOp.getId(), newEntry);
         }
     }
 
@@ -802,6 +819,7 @@ public class MongoDocumentStore implemen
         log("create", updateOps);
         List<T> docs = new ArrayList<T>();
         DBObject[] inserts = new DBObject[updateOps.size()];
+        List<String> ids = Lists.newArrayListWithCapacity(updateOps.size());
 
         for (int i = 0; i < updateOps.size(); i++) {
             inserts[i] = new BasicDBObject();
@@ -810,6 +828,7 @@ public class MongoDocumentStore implemen
             T target = collection.newDocument(this);
             UpdateUtils.applyChanges(target, update);
             docs.add(target);
+            ids.add(updateOps.get(i).getId());
             for (Entry<Key, Operation> entry : update.getChanges().entrySet()) 
{
                 Key k = entry.getKey();
                 Operation op = entry.getValue();
@@ -853,7 +872,8 @@ public class MongoDocumentStore implemen
         }
 
         DBCollection dbCollection = getDBCollection(collection);
-        final long start = PERFLOG.start();
+        final Stopwatch watch = startWatch();
+        boolean insertSuccess = false;
         try {
             try {
                 dbCollection.insert(inserts);
@@ -862,12 +882,13 @@ public class MongoDocumentStore implemen
                         nodesCache.putIfAbsent((NodeDocument) doc);
                     }
                 }
+                insertSuccess = true;
                 return true;
             } catch (MongoException e) {
                 return false;
             }
         } finally {
-            PERFLOG.end(start, 1, "create");
+            stats.doneCreate(watch.elapsed(TimeUnit.NANOSECONDS), collection, 
ids, insertSuccess);
         }
     }
 
@@ -882,7 +903,7 @@ public class MongoDocumentStore implemen
         // make sure we don't modify the original updateOp
         updateOp = updateOp.copy();
         DBObject update = createUpdate(updateOp);
-        final long start = PERFLOG.start();
+        final Stopwatch watch = startWatch();
         try {
             Map<String, NodeDocument> cachedDocs = Collections.emptyMap();
             if (collection == Collection.NODES) {
@@ -929,7 +950,7 @@ public class MongoDocumentStore implemen
                 throw DocumentStoreException.convert(e);
             }
         } finally {
-            PERFLOG.end(start, 1, "update");
+            stats.doneUpdate(watch.elapsed(TimeUnit.NANOSECONDS), collection, 
keys.size());
         }
     }
 
@@ -1220,6 +1241,11 @@ public class MongoDocumentStore implemen
         return doc;
     }
 
+    private Stopwatch startWatch() {
+        return Stopwatch.createStarted();
+    }
+
+
     @Override
     public void setReadWriteMode(String readWriteMode) {
         if (readWriteMode == null || readWriteMode.equals(lastReadWriteMode)) {

Added: 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsTest.java
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsTest.java?rev=1724733&view=auto
==============================================================================
--- 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsTest.java
 (added)
+++ 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsTest.java
 Fri Jan 15 06:21:17 2016
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.plugins.document;
+
+import java.lang.management.ManagementFactory;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import ch.qos.logback.classic.Level;
+import ch.qos.logback.classic.LoggerContext;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.Timer;
+import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser;
+import org.apache.jackrabbit.oak.commons.junit.LogCustomizer;
+import org.apache.jackrabbit.oak.plugins.metric.MetricStatisticsProvider;
+import org.junit.After;
+import org.junit.Test;
+import org.slf4j.LoggerFactory;
+
+import static com.google.common.collect.ImmutableList.of;
+import static org.junit.Assert.assertEquals;
+
+public class DocumentStoreStatsTest {
+    private ScheduledExecutorService executor = 
Executors.newSingleThreadScheduledExecutor();
+    private MetricStatisticsProvider statsProvider =
+            new 
MetricStatisticsProvider(ManagementFactory.getPlatformMBeanServer(),executor);
+    private DocumentStoreStats stats = new DocumentStoreStats(statsProvider);
+
+    @After
+    public void shutDown(){
+        statsProvider.close();
+        new ExecutorCloser(executor).close();
+    }
+
+    @Test
+    public void doneFindCached() throws Exception{
+        stats.doneFindCached(Collection.NODES, "foo");
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_FIND_CACHED).getCount());
+
+        stats.doneFindCached(Collection.SETTINGS, "foo");
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_FIND_CACHED).getCount());
+    }
+
+    @Test
+    public void doneFindUncached() throws Exception{
+        stats.doneFindUncached(100, Collection.NODES, "0:/", true, false);
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_FIND_PRIMARY).getCount());
+        assertEquals(100, 
getTimer(DocumentStoreStats.NODES_FIND_TIMER).getSnapshot().getMax());
+        assertEquals(0, 
getMeter(DocumentStoreStats.NODES_FIND_SLAVE).getCount());
+
+        stats.doneFindUncached(100, Collection.NODES, "0:/", true, true);
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_FIND_PRIMARY).getCount());
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_FIND_SLAVE).getCount());
+
+        stats.doneFindUncached(100, Collection.NODES, "2:p/foo", true, false);
+        assertEquals(2, 
getMeter(DocumentStoreStats.NODES_FIND_PRIMARY).getCount());
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_FIND_SPLIT).getCount());
+
+        stats.doneFindUncached(100, Collection.SETTINGS, "0:/", true, false);
+        //Nothing change for other collection
+        assertEquals(2, 
getMeter(DocumentStoreStats.NODES_FIND_PRIMARY).getCount());
+    }
+
+    @Test
+    public void doneQuery_Nodes() throws Exception{
+        stats.doneQuery(100, Collection.NODES, "foo", "bar", null, 5, -1, 
false);
+        assertEquals(5, 
getMeter(DocumentStoreStats.NODES_QUERY_FIND_READ_COUNT).getCount());
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_QUERY_PRIMARY).getCount());
+
+        stats.doneQuery(100, Collection.NODES, "foo", "bar", null, 7, -1, 
true);
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_QUERY_SLAVE).getCount());
+        assertEquals(12, 
getMeter(DocumentStoreStats.NODES_QUERY_FIND_READ_COUNT).getCount());
+
+        stats.doneQuery(100, Collection.NODES, "foo", "bar", null, 7, 1000, 
false);
+        assertEquals(2, 
getMeter(DocumentStoreStats.NODES_QUERY_PRIMARY).getCount());
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_QUERY_LOCK).getCount());
+    }
+
+    @Test
+    public void doneQuery_Journal() throws Exception{
+        stats.doneQuery(100, Collection.JOURNAL, "foo", "bar", null, 5, -1, 
false);
+        assertEquals(5, getMeter(DocumentStoreStats.JOURNAL_QUERY).getCount());
+        assertEquals(1, 
getTimer(DocumentStoreStats.JOURNAL_QUERY_TIMER).getCount());
+    }
+
+    @Test
+    public void doneCreate_Journal() throws Exception{
+        stats.doneCreate(100, Collection.JOURNAL, of("a", "b"), true);
+        assertEquals(2, 
getMeter(DocumentStoreStats.JOURNAL_CREATE).getCount());
+        assertEquals(100, 
getTimer(DocumentStoreStats.JOURNAL_CREATE_TIMER).getSnapshot().getMax());
+    }
+
+    @Test
+    public void doneFindAndModify() throws Exception{
+        stats.doneFindAndModify(100, Collection.NODES, "foo", true);
+        assertEquals(1, 
getMeter(DocumentStoreStats.NODES_CREATE_UPSERT).getCount());
+        assertEquals(100, 
getTimer(DocumentStoreStats.NODES_CREATE_UPSERT_TIMER).getSnapshot().getMax());
+
+        stats.doneFindAndModify(100, Collection.NODES, "foo", false);
+        assertEquals(1, getMeter(DocumentStoreStats.NODES_UPDATE).getCount());
+        assertEquals(100, 
getTimer(DocumentStoreStats.NODES_UPDATE_TIMER).getSnapshot().getMax());
+    }
+
+    @Test
+    public void perfLog() throws Exception{
+        String logName = DocumentStoreStats.class.getName() + ".perf";
+        LogCustomizer customLogs = LogCustomizer.forLogger(logName)
+                .filter(Level.TRACE)
+                .create();
+
+        enableLevel(logName, Level.INFO);
+        customLogs.starting();
+
+        //No logs untill debug enabled
+        stats.doneFindAndModify(100, Collection.NODES, "foo", true);
+        assertEquals(0, customLogs.getLogs().size());
+
+        stats.doneFindAndModify(TimeUnit.SECONDS.toNanos(10), 
Collection.NODES, "foo", true);
+        assertEquals(0, customLogs.getLogs().size());
+
+        //Change level to DEBUG - Now threshold rule applies
+        enableLevel(logName, Level.DEBUG);
+
+        stats.doneFindAndModify(100, Collection.NODES, "foo", true);
+        assertEquals(0, customLogs.getLogs().size());
+
+        stats.doneFindAndModify(TimeUnit.SECONDS.toNanos(10), 
Collection.NODES, "foo", true);
+        assertEquals(1, customLogs.getLogs().size());
+
+        //With trace level everything is logged
+        enableLevel(logName, Level.TRACE);
+        stats.doneFindAndModify(100, Collection.NODES, "foo", true);
+        assertEquals(2, customLogs.getLogs().size());
+
+        customLogs.finished();
+    }
+
+
+    private Meter getMeter(String name) {
+        return statsProvider.getRegistry().getMeters().get(name);
+    }
+
+    private Timer getTimer(String name) {
+        return statsProvider.getRegistry().getTimers().get(name);
+    }
+
+    private static void enableLevel(String logName, Level level){
+        ((LoggerContext)LoggerFactory.getILoggerFactory())
+                .getLogger(logName).setLevel(level);
+    }
+}
\ No newline at end of file

Propchange: 
jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsTest.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: 
jackrabbit/oak/trunk/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/DocumentNodeStoreConfigTest.groovy
URL: 
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/DocumentNodeStoreConfigTest.groovy?rev=1724733&r1=1724732&r2=1724733&view=diff
==============================================================================
--- 
jackrabbit/oak/trunk/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/DocumentNodeStoreConfigTest.groovy
 (original)
+++ 
jackrabbit/oak/trunk/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/DocumentNodeStoreConfigTest.groovy
 Fri Jan 15 06:21:17 2016
@@ -24,12 +24,16 @@ import org.apache.jackrabbit.oak.api.Blo
 import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean
 import org.apache.jackrabbit.oak.plugins.blob.CachingBlobStore
 import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore
+import org.apache.jackrabbit.oak.plugins.document.DocumentStoreStatsMBean
 import org.apache.jackrabbit.oak.plugins.document.mongo.MongoBlobStore
 import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection
 import org.apache.jackrabbit.oak.spi.blob.BlobStore
 import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore
 import org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore
 import org.apache.jackrabbit.oak.spi.blob.stats.BlobStoreStatsMBean
+import org.apache.jackrabbit.oak.spi.commit.CommitInfo
+import org.apache.jackrabbit.oak.spi.commit.EmptyHook
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder
 import org.apache.jackrabbit.oak.spi.state.NodeStore
 import org.h2.jdbcx.JdbcDataSource
 import org.junit.After
@@ -255,10 +259,22 @@ class DocumentNodeStoreConfigTest extend
                 "be registered by DocumentNodeStoreService in default 
blobStore used"
 
         testBlobStoreStats(ns)
+        testDocumentStoreStats(ns)
     }
 
+    private void testDocumentStoreStats(DocumentNodeStore store) {
+        DocumentStoreStatsMBean stats = 
getService(DocumentStoreStatsMBean.class)
 
-    public void testBlobStoreStats(DocumentNodeStore nodeStore) throws 
Exception{
+        long createdNodeCount = stats.nodesCreateCount
+        NodeBuilder builder = store.getRoot().builder()
+        builder.child("testDocumentStoreStats").child("a")
+        store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+
+        assert stats.nodesCreateCount - createdNodeCount >= 2
+
+    }
+
+    private void testBlobStoreStats(DocumentNodeStore nodeStore) throws 
Exception{
         int size = 1024 * 1024 * 5
         Blob blob = nodeStore.createBlob(testStream(size));
         BlobStoreStatsMBean stats = getService(BlobStoreStatsMBean.class)



Reply via email to