Author: reschke
Date: Thu Nov 19 14:58:49 2015
New Revision: 1715191
URL: http://svn.apache.org/viewvc?rev=1715191&view=rev
Log:
OAK-3657: RDBDocumentStore: only use cache update logic introduced for OAK-3566
for NODES collection
Modified:
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
Modified:
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java?rev=1715191&r1=1715190&r2=1715191&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
(original)
+++
jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
Thu Nov 19 14:58:49 2015
@@ -1051,20 +1051,24 @@ public class RDBDocumentStore implements
String appendData = ser.asString(update);
for (List<String> chunkedIds : Lists.partition(ids, CHUNKSIZE)) {
- // remember what we already have in the cache
- Set<QueryContext> seenQueryContext = new
HashSet<QueryContext>();
+
+ Set<QueryContext> seenQueryContext = Collections.emptySet();
Map<String, NodeDocument> cachedDocs = Collections.emptyMap();
- // keep concurrently running queries from updating
- // the cache entry for this key
- for (QueryContext qc : qmap.values()) {
- qc.addKeys(chunkedIds);
- seenQueryContext.add(qc);
- }
+
if (collection == Collection.NODES) {
+ // remember what we already have in the cache
cachedDocs = new HashMap<String, NodeDocument>();
for (String key : chunkedIds) {
cachedDocs.put(key, nodesCache.getIfPresent(new
StringValue(key)));
}
+
+ // keep concurrently running queries from updating
+ // the cache entry for this key
+ seenQueryContext = new HashSet<QueryContext>();
+ for (QueryContext qc : qmap.values()) {
+ qc.addKeys(chunkedIds);
+ seenQueryContext.add(qc);
+ }
}
Connection connection = null;
@@ -1081,11 +1085,13 @@ public class RDBDocumentStore implements
this.ch.closeConnection(connection);
}
if (success) {
- // keep concurrently running queries from updating
- // the cache entry for this key
- for (QueryContext qc : qmap.values()) {
- if (!seenQueryContext.contains(qc)) {
- qc.addKeys(chunkedIds);
+ if (collection == Collection.NODES) {
+ // keep concurrently running queries from updating
+ // the cache entry for this key
+ for (QueryContext qc : qmap.values()) {
+ if (!seenQueryContext.contains(qc)) {
+ qc.addKeys(chunkedIds);
+ }
}
}
for (Entry<String, NodeDocument> entry :
cachedDocs.entrySet()) {
@@ -1197,8 +1203,11 @@ public class RDBDocumentStore implements
}
try {
long now = System.currentTimeMillis();
- QueryContext qp = new QueryContext(fromKey, toKey);
- qmap.put(Thread.currentThread(), qp);
+ QueryContext qp = null;
+ if (collection == Collection.NODES) {
+ qp = new QueryContext(fromKey, toKey);
+ qmap.put(Thread.currentThread(), qp);
+ }
connection = this.ch.getROConnection();
String from = collection == Collection.NODES &&
NodeDocument.MIN_ID_VALUE.equals(fromKey) ? null : fromKey;
String to = collection == Collection.NODES &&
NodeDocument.MAX_ID_VALUE.equals(toKey) ? null : toKey;
@@ -1212,7 +1221,9 @@ public class RDBDocumentStore implements
T doc = runThroughCache(collection, row, now, qp);
result.add(doc);
}
- qp.dispose();
+ if (qp != null) {
+ qp.dispose();
+ }
return result;
} catch (Exception ex) {
LOG.error("SQL exception on query", ex);