Author: reschke
Date: Wed Dec 6 17:54:14 2017
New Revision: 1817311
URL: http://svn.apache.org/viewvc?rev=1817311&view=rev
Log:
OAK-6965: RDBDocumentStore: allow schema evolution part 5: add rows for
performant VGC
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializer.java
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreDB.java
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreJDBC.java
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBExport.java
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBHelper.java
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBRow.java
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBVersionGCSupport.java
jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializerTest.java
jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreSchemaUpgradeTest.java
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializer.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializer.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializer.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializer.java
Wed Dec 6 17:54:14 2017
@@ -55,9 +55,11 @@ public class RDBDocumentSerializer {
private final DocumentStore store;
- private static final String MODIFIED = "_modified";
- private static final String MODCOUNT = "_modCount";
+ private static final String MODIFIED = NodeDocument.MODIFIED_IN_SECS;
+ private static final String MODCOUNT = NodeDocument.MOD_COUNT;
private static final String CMODCOUNT = "_collisionsModCount";
+ private static final String SDTYPE = NodeDocument.SD_TYPE;
+ private static final String SDMAXREVTIME =
NodeDocument.SD_MAX_REV_TIME_IN_SECS;
private static final String ID = "_id";
private static final String HASBINARY = NodeDocument.HAS_BINARY_FLAG;
private static final String DELETEDONCE = NodeDocument.DELETED_ONCE;
@@ -164,6 +166,14 @@ public class RDBDocumentSerializer {
if (row.deletedOnce() != null) {
doc.put(DELETEDONCE, row.deletedOnce().booleanValue());
}
+ if (row.getSchemaVersion() >= 2) {
+ if (row.getSdType() != RDBRow.LONG_UNSET) {
+ doc.put(SDTYPE, row.getSdType());
+ }
+ if (row.getSdMaxRevTime() != RDBRow.LONG_UNSET) {
+ doc.put(SDMAXREVTIME, row.getSdMaxRevTime());
+ }
+ }
byte[] bdata = row.getBdata();
boolean blobInUse = false;
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
Wed Dec 6 17:54:14 2017
@@ -133,54 +133,63 @@ import com.google.common.collect.Sets;
* <tr>
* <th>ID</th>
* <td>varchar(512) not null primary key</td>
- * <td>the document's key (for databases that can not handle 512 character
- * primary keys, such as MySQL, varbinary is possible as wells)</td>
+ * <td>The document's key (for databases that can not handle 512 character
+ * primary keys, such as MySQL, varbinary is possible as well).</td>
* </tr>
* <tr>
* <th>MODIFIED</th>
* <td>bigint</td>
- * <td>low-resolution timestamp
+ * <td>Low-resolution timestamp.
* </tr>
* <tr>
* <th>HASBINARY</th>
* <td>smallint</td>
- * <td>flag indicating whether the document has binary properties
+ * <td>Flag indicating whether the document has binary properties.
* </tr>
* <tr>
* <th>DELETEDONCE</th>
* <td>smallint</td>
- * <td>flag indicating whether the document has been deleted once
+ * <td>Flag indicating whether the document has been deleted once.
* </tr>
* <tr>
* <th>MODCOUNT</th>
* <td>bigint</td>
- * <td>modification counter, used for avoiding overlapping updates</td>
+ * <td>Modification counter, used for avoiding overlapping updates.</td>
* </tr>
* <tr>
* <th>DSIZE</th>
* <td>bigint</td>
- * <td>the approximate size of the document's JSON serialization (for debugging
- * purposes)</td>
+ * <td>The approximate size of the document's JSON serialization (for debugging
+ * purposes).</td>
* </tr>
* <tr>
* <th>VERSION</th>
* <td>smallint</td>
- * <td>the schema version the code writing to a row (or inserting it) was aware
+ * <td>The schema version the code writing to a row (or inserting it) was aware
* of (introduced with schema version 1). Not set for rows written by version 0
* client code.</td>
* </tr>
* <tr>
+ * <th>SDTYPE</th>
+ * <td>smallint</td>
+ * <td>Split Document type.</td>
+ * </tr>
+ * <th>SDMAXREVTIME</th>
+ * <td>bigint</td>
+ * <td>Split document max revision time..</td>
+ * </tr>
+ * <tr>
* <th>DATA</th>
* <td>varchar(16384)</td>
- * <td>the document's JSON serialization (only used for small document sizes,
in
+ * <td>The document's JSON serialization (only used for small document sizes,
in
* which case BDATA (below) is not set), or a sequence of JSON serialized
update
- * operations to be applied against the last full serialization</td>
+ * operations to be applied against the last full serialization.</td>
* </tr>
* <tr>
* <th>BDATA</th>
* <td>blob</td>
- * <td>the document's JSON serialization (usually GZIPped, only used for
"large"
- * documents)</td>
+ * <td>The document's JSON serialization (usually GZIPped, only used for
"large"
+ * documents).</td>
* </tr>
* </tbody>
* </table>
@@ -197,14 +206,16 @@ import com.google.common.collect.Sets;
* version of the code writing to the database (upon insert and update). This
is
* in preparation of future layout changes which might introduce new columns.
* <p>
- * The code deals with both version 0 and version 1 table layouts. By default,
- * it tries to create version 1 tables, and also tries to upgrade existing
- * version 0 tables to version 1.
+ * Version 2 introduces an additional "sdtype" and "sdmaxrevtime".
+ * <p>
+ * The code deals with both version 0, version 1 and version 2 table layouts.
By
+ * default, it tries to create version 2 tables, and also tries to upgrade
+ * existing version 0 and 1 tables to version 2.
* <h4>DB-specific information</h4>
* <p>
* <em>Note that the database needs to be created/configured to support all
* Unicode characters in text fields, and to collate by Unicode code point (in
- * DB2: "collate using identity", in Postgres: "C"). THIS IS NOT THE
+ * DB2: "collate using identity", in PostgreSQL: "C"). THIS IS NOT THE
* DEFAULT!</em>
* <p>
* <em>For MySQL, the database parameter "max_allowed_packet" needs to be
@@ -639,7 +650,9 @@ public class RDBDocumentStore implements
private final String name;
private boolean idIsBinary = false;
private boolean hasVersion = false;
+ private boolean hasSplitDocs = false;
private int dataLimitInOctets = 16384;
+ private Set<String> columnOnlyProperties =
Collections.unmodifiableSet(COLUMNPROPERTIES);
private Set<String> columnProperties =
Collections.unmodifiableSet(COLUMNPROPERTIES);
public RDBTableMetaData(String name) {
@@ -654,6 +667,10 @@ public class RDBDocumentStore implements
return this.columnProperties;
}
+ public Set<String> getColumnOnlyProperties() {
+ return this.columnOnlyProperties;
+ }
+
public String getName() {
return this.name;
}
@@ -662,6 +679,10 @@ public class RDBDocumentStore implements
return this.idIsBinary;
}
+ public boolean hasSplitDocs() {
+ return this.hasSplitDocs;
+ }
+
public boolean hasVersion() {
return this.hasVersion;
}
@@ -670,6 +691,11 @@ public class RDBDocumentStore implements
this.idIsBinary = idIsBinary;
}
+ public void setHasSplitDocs(boolean hasSplitDocs) {
+ this.hasSplitDocs = hasSplitDocs;
+ this.columnProperties = Collections.unmodifiableSet(hasSplitDocs ?
COLUMNPROPERTIES2 : COLUMNPROPERTIES) ;
+ }
+
public void setHasVersion(boolean hasVersion) {
this.hasVersion = hasVersion;
}
@@ -795,7 +821,7 @@ public class RDBDocumentStore implements
protected static final boolean USECMODCOUNT = true;
// Database schema supported by this version
- protected static final int SCHEMA = 1;
+ protected static final int SCHEMA = 2;
private static final Key MODIFIEDKEY = new Key(MODIFIED, null);
@@ -811,9 +837,12 @@ public class RDBDocumentStore implements
private DocumentStoreStatsCollector stats;
+ // VERSION column mapping in queries used by RDBVersionGCSupport
+ public static String VERSIONPROP = "__version";
+
// set of supported indexed properties
private static final Set<String> INDEXEDPROPERTIES = new
HashSet<String>(Arrays.asList(new String[] { MODIFIED,
- NodeDocument.HAS_BINARY_FLAG, NodeDocument.DELETED_ONCE }));
+ NodeDocument.HAS_BINARY_FLAG, NodeDocument.DELETED_ONCE,
NodeDocument.SD_TYPE, NodeDocument.SD_MAX_REV_TIME_IN_SECS, VERSIONPROP }));
// set of required table columns
private static final Set<String> REQUIREDCOLUMNS =
Collections.unmodifiableSet(new HashSet<String>(Arrays.asList(
@@ -821,11 +850,15 @@ public class RDBDocumentStore implements
// set of optional table columns
private static final Set<String> OPTIONALCOLUMNS = Collections
- .unmodifiableSet(new HashSet<String>(Arrays.asList(new String[] {
"version" })));
+ .unmodifiableSet(new HashSet<String>(Arrays.asList(new String[] {
"version", "sdtype", "sdmaxrevtime" })));
// set of properties not serialized to JSON
private static final Set<String> COLUMNPROPERTIES = new
HashSet<String>(Arrays.asList(
new String[] { ID, NodeDocument.HAS_BINARY_FLAG,
NodeDocument.DELETED_ONCE, COLLISIONSMODCOUNT, MODIFIED, MODCOUNT }));
+ // set of properties not serialized to JSON, schema version 2
+ private static final Set<String> COLUMNPROPERTIES2 = new
HashSet<String>(Arrays.asList(
+ new String[] { ID, NodeDocument.HAS_BINARY_FLAG,
NodeDocument.DELETED_ONCE, COLLISIONSMODCOUNT, MODIFIED, MODCOUNT,
+ NodeDocument.SD_TYPE,
NodeDocument.SD_MAX_REV_TIME_IN_SECS, VERSIONPROP }));
private final RDBDocumentSerializer ser = new RDBDocumentSerializer(this);
@@ -945,6 +978,9 @@ public class RDBDocumentStore implements
if ("version".equals(lcName)) {
tmd.setHasVersion(true);
}
+ if ("sdtype".equals(lcName)) {
+ tmd.setHasSplitDocs(true);
+ }
}
}
@@ -1072,7 +1108,7 @@ public class RDBDocumentStore implements
ResultSet checkResultSet = null;
Statement creatStatement = null;
- Statement upgradeStatement = null;
+
try {
// avoid PreparedStatement due to weird DB2 behavior (OAK-6237)
checkStatement = con.createStatement();
@@ -1086,6 +1122,7 @@ public class RDBDocumentStore implements
Set<String> requiredColumns = new HashSet<String>(REQUIREDCOLUMNS);
Set<String> unknownColumns = new HashSet<String>();
boolean hasVersionColumn = false;
+ boolean hasSDTypeColumn = false;
for (int i = 1; i <= met.getColumnCount(); i++) {
String cname =
met.getColumnName(i).toLowerCase(Locale.ENGLISH);
if (!requiredColumns.remove(cname)) {
@@ -1096,6 +1133,9 @@ public class RDBDocumentStore implements
if (cname.equals("version")) {
hasVersionColumn = true;
}
+ if (cname.equals("sdtype")) {
+ hasSDTypeColumn = true;
+ }
}
if (!requiredColumns.isEmpty()) {
@@ -1124,20 +1164,11 @@ public class RDBDocumentStore implements
boolean dbWasChanged = false;
if (!hasVersionColumn && upgradeToSchema >= 1) {
- for (String upStatement1 :
this.dbInfo.getTableUpgradeStatements(tableName, 1)) {
- try {
- upgradeStatement = con.createStatement();
- upgradeStatement.execute(upStatement1);
- upgradeStatement.close();
- con.commit();
- LOG.info("Upgraded " + tableName + " to DB level 1
using '" + upStatement1 + "'");
- dbWasChanged = true;
- } catch (SQLException exup) {
- con.rollback();
- LOG.info("Attempted to upgrade " + tableName + " to DB
level 1 using '" + upStatement1
- + "', but failed - will continue without.",
exup);
- }
- }
+ dbWasChanged |= upgradeTable(con, tableName, 1);
+ }
+
+ if (!hasSDTypeColumn && upgradeToSchema >= 2) {
+ dbWasChanged |= upgradeTable(con, tableName, 2);
}
tablesPresent.add(tableName);
@@ -1155,7 +1186,7 @@ public class RDBDocumentStore implements
creatStatement.execute(this.dbInfo.getTableCreationStatement(tableName,
initialSchema));
creatStatement.close();
- for (String ic :
this.dbInfo.getIndexCreationStatements(tableName)) {
+ for (String ic :
this.dbInfo.getIndexCreationStatements(tableName, initialSchema)) {
creatStatement = con.createStatement();
creatStatement.execute(ic);
creatStatement.close();
@@ -1164,19 +1195,11 @@ public class RDBDocumentStore implements
con.commit();
if (initialSchema < 1 && upgradeToSchema >= 1) {
- for (String upStatement1 :
this.dbInfo.getTableUpgradeStatements(tableName, 1)) {
- try {
- upgradeStatement = con.createStatement();
- upgradeStatement.execute(upStatement1);
- upgradeStatement.close();
- con.commit();
- LOG.info("Upgraded " + tableName + " to DB level 1
using '" + upStatement1 + "'");
- } catch (SQLException exup) {
- con.rollback();
- LOG.info("Attempted to upgrade " + tableName + "
to DB level 1 using '" + upStatement1
- + "', but failed - will continue
without.", exup);
- }
- }
+ upgradeTable(con, tableName, 1);
+ }
+
+ if (initialSchema < 2 && upgradeToSchema >= 2) {
+ upgradeTable(con, tableName, 2);
}
tablesCreated.add(tableName);
@@ -1193,12 +1216,35 @@ public class RDBDocumentStore implements
closeResultSet(checkResultSet);
closeStatement(checkStatement);
closeStatement(creatStatement);
- closeStatement(upgradeStatement);
}
overallDiagnostics.append(diagnostics);
}
+ private boolean upgradeTable(Connection con, String tableName, int level)
throws SQLException {
+ boolean wasChanged = false;
+
+ for (String statement :
this.dbInfo.getTableUpgradeStatements(tableName, level)) {
+ Statement upgradeStatement = null;
+ try {
+ upgradeStatement = con.createStatement();
+ upgradeStatement.execute(statement);
+ upgradeStatement.close();
+ con.commit();
+ LOG.info("Upgraded " + tableName + " to DB level " + level + "
using '" + statement + "'");
+ wasChanged = true;
+ } catch (SQLException exup) {
+ con.rollback();
+ LOG.info("Attempted to upgrade " + tableName + " to DB level "
+ level + " using '" + statement
+ + "', but failed - will continue without.", exup);
+ } finally {
+ closeStatement(upgradeStatement);
+ }
+ }
+
+ return wasChanged;
+ }
+
private static void getTableMetaData(Connection con, Collection<? extends
Document> col, RDBTableMetaData tmd,
StringBuilder diagnostics) throws SQLException {
Statement checkStatement = null;
@@ -1480,8 +1526,8 @@ public class RDBDocumentStore implements
RDBTableMetaData tmd = getTable(collection);
for (QueryCondition cond : conditions) {
if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
- String message = "indexed property " + cond.getPropertyName()
+ " not supported, query was '" + cond.getOperator()
- + "'" + cond.getValue() + "'; supported properties are
" + INDEXEDPROPERTIES;
+ String message = "indexed property " + cond.getPropertyName()
+ " not supported, query was '" + cond
+ + "'; supported properties are " + INDEXEDPROPERTIES;
LOG.info(message);
throw new DocumentStoreException(message);
}
@@ -1571,12 +1617,13 @@ public class RDBDocumentStore implements
final List<String> excludeKeyPatterns, final List<QueryCondition>
conditions, final int limit, final String sortBy) {
final RDBTableMetaData tmd = getTable(collection);
+ Set<String> allowedProps = Sets.intersection(INDEXEDPROPERTIES,
tmd.getColumnProperties());
for (QueryCondition cond : conditions) {
- if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
- String message = "indexed property " + cond.getPropertyName()
+ " not supported, query was '" + cond.getOperator()
- + "'" + cond.getValue() + "'; supported properties are
" + INDEXEDPROPERTIES;
+ if (!allowedProps.contains(cond.getPropertyName())) {
+ String message = "indexed property " + cond.getPropertyName()
+ " not supported, query was '" + cond
+ + "'; supported properties are " + allowedProps;
LOG.info(message);
- throw new DocumentStoreException(message);
+ throw new UnsupportedIndexedPropertyException(message);
}
}
@@ -1634,8 +1681,8 @@ public class RDBDocumentStore implements
final RDBTableMetaData tmd = getTable(collection);
for (QueryCondition cond : conditions) {
if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
- String message = "indexed property " + cond.getPropertyName()
+ " not supported, query was '" + cond.getOperator()
- + "'" + cond.getValue() + "'; supported properties are
" + INDEXEDPROPERTIES;
+ String message = "indexed property " + cond.getPropertyName()
+ " not supported, query was '" + cond
+ + "'; supported properties are " + INDEXEDPROPERTIES;
LOG.info(message);
throw new DocumentStoreException(message);
}
@@ -1803,7 +1850,7 @@ public class RDBDocumentStore implements
// every 16th update is a full rewrite
if (isAppendableUpdate(update) && modcount % 16 != 0) {
- String appendData = ser.asString(update,
tmd.getColumnProperties());
+ String appendData = ser.asString(update,
tmd.getColumnOnlyProperties());
if (appendData.length() < tmd.getDataLimitInOctets() /
CHAR2OCTETRATIO) {
try {
Operation modOperation =
update.getChanges().get(MODIFIEDKEY);
@@ -1825,7 +1872,7 @@ public class RDBDocumentStore implements
}
}
if (!success && shouldRetry) {
- data = ser.asString(document, tmd.getColumnProperties());
+ data = ser.asString(document, tmd.getColumnOnlyProperties());
Object m = document.get(MODIFIED);
long modified = (m instanceof Long) ? ((Long)m).longValue() :
0;
success = db.update(connection, tmd, document.getId(),
modified, hasBinary, deletedOnce, modcount, cmodcount,
@@ -1899,7 +1946,7 @@ public class RDBDocumentStore implements
int longest = 0, longestChars = 0;
for (Document d : documents) {
- String data = ser.asString(d, tmd.getColumnProperties());
+ String data = ser.asString(d,
tmd.getColumnOnlyProperties());
byte bytes[] = asBytes(data);
if (bytes.length > longest) {
longest = bytes.length;
@@ -2102,16 +2149,37 @@ public class RDBDocumentStore implements
return handleException(message, ex, collection,
Collections.singleton(id));
}
+ protected class UnsupportedIndexedPropertyException extends
DocumentStoreException {
+
+ private static final long serialVersionUID = -8392572622365260105L;
+
+ public UnsupportedIndexedPropertyException(String message) {
+ super(message);
+ }
+ }
+
// slightly extended query support
protected static class QueryCondition {
private final String propertyName, operator;
- private final long value;
+ private final List<? extends Object> operands;
public QueryCondition(String propertyName, String operator, long
value) {
this.propertyName = propertyName;
this.operator = operator;
- this.value = value;
+ this.operands = Collections.singletonList(value);
+ }
+
+ public QueryCondition(String propertyName, String operator, List<?
extends Object> values) {
+ this.propertyName = propertyName;
+ this.operator = operator;
+ this.operands = values;
+ }
+
+ public QueryCondition(String propertyName, String operator) {
+ this.propertyName = propertyName;
+ this.operator = operator;
+ this.operands = Collections.emptyList();
}
public String getPropertyName() {
@@ -2122,13 +2190,19 @@ public class RDBDocumentStore implements
return operator;
}
- public long getValue() {
- return value;
+ public List<? extends Object> getOperands() {
+ return this.operands;
}
@Override
public String toString() {
- return String.format("%s %s %d", propertyName, operator, value);
+ if (this.operands.isEmpty()) {
+ return String.format("%s %s", propertyName, operator);
+ } else if (this.operands.size() == 1) {
+ return String.format("%s %s %s", propertyName, operator,
operands.get(0).toString());
+ } else {
+ return String.format("%s %s %s", propertyName, operator,
operands.toString());
+ }
}
}
}
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreDB.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreDB.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreDB.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreDB.java
Wed Dec 6 17:54:14 2017
@@ -26,6 +26,7 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@@ -37,7 +38,7 @@ import org.apache.jackrabbit.oak.plugins
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
/**
* Defines variation in the capabilities of different RDBs.
@@ -111,7 +112,9 @@ public enum RDBDocumentStoreDB {
public String getTableCreationStatement(String tableName, int schema) {
return ("create table " + tableName
+ " (ID varchar(512) not null primary key, MODIFIED
bigint, HASBINARY smallint, DELETEDONCE smallint, MODCOUNT bigint, CMODCOUNT
bigint, DSIZE bigint, "
- + (schema >= 1 ? "VERSION smallint, " : "") + "DATA
varchar(16384), BDATA bytea)");
+ + (schema >= 1 ? "VERSION smallint, " : "")
+ + (schema >= 2 ? "SDTYPE smallint, SDMAXREVTIME bigint, "
: "")
+ + "DATA varchar(16384), BDATA bytea)");
}
@Override
@@ -157,16 +160,18 @@ public enum RDBDocumentStoreDB {
public String getTableCreationStatement(String tableName, int schema) {
return "create table " + tableName
+ " (ID varchar(512) not null, MODIFIED bigint, HASBINARY
smallint, DELETEDONCE smallint, MODCOUNT bigint, CMODCOUNT bigint, DSIZE
bigint, "
- + (schema >= 1 ? "VERSION smallint, " : "") + "DATA
varchar(16384), BDATA blob(" + 1024 * 1024 * 1024 + "))";
+ + (schema >= 1 ? "VERSION smallint, " : "")
+ + (schema >= 2 ? "SDTYPE smallint, SDMAXREVTIME bigint, "
: "")
+ + "DATA varchar(16384), BDATA blob(" + 1024 * 1024 * 1024
+ "))";
}
@Override
- public List<String> getIndexCreationStatements(String tableName) {
+ public List<String> getIndexCreationStatements(String tableName, int
schema) {
List<String> statements = new ArrayList<String>();
String pkName = tableName + "_pk";
statements.add("create unique index " + pkName + " on " +
tableName + " ( ID ) cluster");
statements.add("alter table " + tableName + " add constraint " +
pkName + " primary key ( ID )");
- statements.addAll(super.getIndexCreationStatements(tableName));
+ statements.addAll(super.getIndexCreationStatements(tableName,
schema));
return statements;
}
@@ -238,7 +243,9 @@ public enum RDBDocumentStoreDB {
// see https://issues.apache.org/jira/browse/OAK-1914
return ("create table " + tableName
+ " (ID varchar(512) not null primary key, MODIFIED
number, HASBINARY number, DELETEDONCE number, MODCOUNT number, CMODCOUNT
number, DSIZE number, "
- + (schema >= 1 ? "VERSION number, " : "") + "DATA
varchar(4000), BDATA blob)");
+ + (schema >= 1 ? "VERSION number, " : "")
+ + (schema >= 2 ? "SDTYPE number, SDMAXREVTIME number, " :
"")
+ + "DATA varchar(4000), BDATA blob)");
}
@Override
@@ -266,6 +273,16 @@ public enum RDBDocumentStoreDB {
}
return result.toString();
}
+
+ @Override
+ public String getSmallintType() {
+ return "number";
+ }
+
+ @Override
+ public String getBigintType() {
+ return "number";
+ }
},
MYSQL("MySQL") {
@@ -284,7 +301,9 @@ public enum RDBDocumentStoreDB {
// see https://issues.apache.org/jira/browse/OAK-1913
return ("create table " + tableName
+ " (ID varbinary(512) not null primary key, MODIFIED
bigint, HASBINARY smallint, DELETEDONCE smallint, MODCOUNT bigint, CMODCOUNT
bigint, DSIZE bigint, "
- + (schema >= 1 ? "VERSION smallint, " : "") + "DATA
varchar(16000), BDATA longblob)");
+ + (schema >= 1 ? "VERSION smallint, " : "")
+ + (schema >= 2 ? "SDTYPE smallint, SDMAXREVTIME bigint, "
: "")
+ + "DATA varchar(16000), BDATA longblob)");
}
@Override
@@ -356,7 +375,9 @@ public enum RDBDocumentStoreDB {
// see https://issues.apache.org/jira/browse/OAK-2395
return ("create table " + tableName
+ " (ID varbinary(512) not null primary key, MODIFIED
bigint, HASBINARY smallint, DELETEDONCE smallint, MODCOUNT bigint, CMODCOUNT
bigint, DSIZE bigint, "
- + (schema >= 1 ? "VERSION smallint, " : "") + "DATA
nvarchar(4000), BDATA varbinary(max))");
+ + (schema >= 1 ? "VERSION smallint, " : "")
+ + (schema >= 2 ? "SDTYPE smallint, SDMAXREVTIME bigint, "
: "")
+ + "DATA nvarchar(4000), BDATA varbinary(max))");
}
@Override
@@ -496,33 +517,59 @@ public enum RDBDocumentStoreDB {
public String getTableCreationStatement(String tableName, int schema) {
return "create table " + tableName
+ " (ID varchar(512) not null primary key, MODIFIED bigint,
HASBINARY smallint, DELETEDONCE smallint, MODCOUNT bigint, CMODCOUNT bigint,
DSIZE bigint, "
- + (schema >= 1 ? "VERSION smallint, " : "") + "DATA
varchar(16384), BDATA blob(" + 1024 * 1024 * 1024 + "))";
+ + (schema >= 1 ? "VERSION smallint, " : "")
+ + (schema >= 2 ? "SDTYPE smallint, SDMAXREVTIME bigint, " : "")
+ + "DATA varchar(16384), BDATA blob(" + 1024 * 1024 * 1024 +
"))";
}
- public List<String> getIndexCreationStatements(String tableName) {
+ public List<String> getIndexCreationStatements(String tableName, int
level) {
+ List<String> result = Lists.newArrayList();
if (CREATEINDEX.equals("modified-id")) {
- return Collections.singletonList("create index " + tableName +
"_MI on " + tableName + " (MODIFIED, ID)");
+ result.add("create index " + tableName + "_MI on " + tableName + "
(MODIFIED, ID)");
} else if (CREATEINDEX.equals("id-modified")) {
- return Collections.singletonList("create index " + tableName +
"_MI on " + tableName + " (ID, MODIFIED)");
+ result.add("create index " + tableName + "_MI on " + tableName + "
(ID, MODIFIED)");
} else if (CREATEINDEX.equals("modified")) {
- return Collections.singletonList("create index " + tableName +
"_MI on " + tableName + " (MODIFIED)");
- } else {
- return Collections.emptyList();
+ result.add("create index " + tableName + "_MI on " + tableName + "
(MODIFIED)");
}
+ if (level == 2) {
+ result.add("create index " + tableName + "_VSN on " + tableName +
" (VERSION)");
+ result.add("create index " + tableName + "_SDT on " + tableName +
" (SDTYPE)");
+ result.add("create index " + tableName + "_SDM on " + tableName +
" (SDMAXREVTIME)");
+ }
+ return result;
}
public String getAdditionalDiagnostics(RDBConnectionHandler ch, String
tableName) {
return "";
}
+ public String getSmallintType() {
+ return "smallint";
+ }
+
+ public String getBigintType() {
+ return "bigint";
+ }
+
/**
* Statements needed to upgrade the DB
*
* @return the table modification string
*/
public List<String> getTableUpgradeStatements(String tableName, int level)
{
- Preconditions.checkArgument(level == 1, "level must be 1");
- return Collections.singletonList("alter table " + tableName + " add
VERSION smallint");
+ String smallint = getSmallintType();
+ String bigint = getBigintType();
+ if (level == 1) {
+ return Collections.singletonList("alter table " + tableName + "
add VERSION " + smallint);
+ } else if (level == 2) {
+ String[] statements = new String[] { "alter table " + tableName +
" add SDTYPE " + smallint,
+ "alter table " + tableName + " add SDMAXREVTIME " + bigint,
+ "create index " + tableName + "_SDT on " + tableName + "
(SDTYPE)",
+ "create index " + tableName + "_SDM on " + tableName + "
(SDMAXREVTIME)", };
+ return Arrays.asList(statements);
+ } else {
+ throw new IllegalArgumentException("level must be 1 or 2");
+ }
}
protected String description;
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreJDBC.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreJDBC.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreJDBC.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreJDBC.java
Wed Dec 6 17:54:14 2017
@@ -192,7 +192,10 @@ public class RDBDocumentStoreJDBC {
try {
int si = 1;
for (QueryCondition cond : conditions) {
- stmt.setLong(si++, cond.getValue());
+ if (cond.getOperands().size() != 1) {
+ throw new DocumentStoreException("unexpected condition: "
+ cond);
+ }
+ stmt.setLong(si++, (Long)cond.getOperands().get(0));
}
return stmt.executeUpdate();
} finally {
@@ -240,16 +243,21 @@ public class RDBDocumentStoreJDBC {
}
public <T extends Document> Set<String> insert(Connection connection,
RDBTableMetaData tmd, List<T> documents) throws SQLException {
+ int actualSchema = tmd.hasSplitDocs() ? 2 : 1;
PreparedStatement stmt = connection.prepareStatement(
- "insert into " + tmd.getName() + "(ID, MODIFIED, HASBINARY,
DELETEDONCE, MODCOUNT, CMODCOUNT, DSIZE, DATA, "
- + (tmd.hasVersion() ? "VERSION, " : "") + " BDATA) " +
"values (?, ?, ?, ?, ?, ?, ?, ?,"
- + (tmd.hasVersion() ? (" " + SCHEMAVERSION + ", ") :
"") + " ?)");
+ "insert into " + tmd.getName() + "(ID, MODIFIED, HASBINARY,
DELETEDONCE, MODCOUNT, CMODCOUNT, DSIZE, "
+ + (tmd.hasVersion() ? "VERSION, " : "")
+ + (tmd.hasSplitDocs() ? "SDTYPE, SDMAXREVTIME, " : "")
+ + "DATA, BDATA) " + "values (?, ?, ?, ?, ?, ?, ?, "
+ + (tmd.hasVersion() ? (" " + actualSchema + ", ") : "")
+ + (tmd.hasSplitDocs() ? "?, ?, " : "")
+ + "?, ?)");
List<T> sortedDocs = sortDocuments(documents);
int[] results;
try {
for (T document : sortedDocs) {
- String data = this.ser.asString(document,
tmd.getColumnProperties());
+ String data = this.ser.asString(document,
tmd.getColumnOnlyProperties());
String id = document.getId();
Number hasBinary = (Number)
document.get(NodeDocument.HAS_BINARY_FLAG);
Boolean deletedOnce = (Boolean)
document.get(NodeDocument.DELETED_ONCE);
@@ -263,6 +271,10 @@ public class RDBDocumentStoreJDBC {
stmt.setObject(si++, document.get(MODCOUNT), Types.BIGINT);
stmt.setObject(si++, cmodcount == null ? Long.valueOf(0) :
cmodcount, Types.BIGINT);
stmt.setObject(si++, data.length(), Types.BIGINT);
+ if (tmd.hasSplitDocs()) {
+ stmt.setObject(si++, document.get(NodeDocument.SD_TYPE));
+ stmt.setObject(si++,
document.get(NodeDocument.SD_MAX_REV_TIME_IN_SECS));
+ }
if (data.length() < tmd.getDataLimitInOctets() /
CHAR2OCTETRATIO) {
stmt.setString(si++, data);
stmt.setBinaryStream(si++, null, 0);
@@ -330,7 +342,7 @@ public class RDBDocumentStoreJDBC {
continue; // This is a new document. We'll deal with the
inserts later.
}
- String data = this.ser.asString(document,
tmd.getColumnProperties());
+ String data = this.ser.asString(document,
tmd.getColumnOnlyProperties());
Number hasBinary = (Number)
document.get(NodeDocument.HAS_BINARY_FLAG);
Boolean deletedOnce = (Boolean)
document.get(NodeDocument.DELETED_ONCE);
Long cmodcount = (Long) document.get(COLLISIONSMODCOUNT);
@@ -408,7 +420,9 @@ public class RDBDocumentStoreJDBC {
long dataTotal = 0, bdataTotal = 0;
PreparedStatement stmt = null;
String fields;
- if (tmd.hasVersion()) {
+ if (tmd.hasSplitDocs()) {
+ fields = "ID, MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY,
DELETEDONCE, VERSION, SDTYPE, SDMAXREVTIME, DATA, BDATA";
+ } else if (tmd.hasVersion()) {
fields = "ID, MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY,
DELETEDONCE, VERSION, DATA, BDATA";
} else {
fields = "ID, MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY,
DELETEDONCE, DATA, BDATA";
@@ -432,9 +446,12 @@ public class RDBDocumentStoreJDBC {
Long hasBinary = readLongOrNullFromResultSet(rs, field++);
Boolean deletedOnce = readBooleanOrNullFromResultSet(rs,
field++);
long schemaVersion = tmd.hasVersion() ?
readLongFromResultSet(rs, field++) : 0;
+ long sdType = tmd.hasSplitDocs() ? readLongFromResultSet(rs,
field++) : 0;
+ long sdMaxRevTime = tmd.hasSplitDocs() ?
readLongFromResultSet(rs, field++) : 0;
String data = rs.getString(field++);
byte[] bdata = rs.getBytes(field++);
- result.add(new RDBRow(id, hasBinary, deletedOnce, modified,
modcount, cmodcount, schemaVersion, data, bdata));
+ result.add(new RDBRow(id, hasBinary, deletedOnce, modified,
modcount, cmodcount, schemaVersion, sdType,
+ sdMaxRevTime, data, bdata));
dataTotal += data.length();
bdataTotal += bdata == null ? 0 : bdata.length;
}
@@ -531,7 +548,9 @@ public class RDBDocumentStoreJDBC {
this.connection = ch.getROConnection();
this.tmd = tmd;
String fields;
- if (tmd.hasVersion()) {
+ if (tmd.hasSplitDocs()) {
+ fields = "ID, MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY,
DELETEDONCE, VERSION, SDTYPE, SDMAXREVTIME, DATA, BDATA";
+ } else if (tmd.hasVersion()) {
fields = "ID, MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY,
DELETEDONCE, VERSION, DATA, BDATA";
} else {
fields = "ID, MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY,
DELETEDONCE, DATA, BDATA";
@@ -583,9 +602,12 @@ public class RDBDocumentStoreJDBC {
Long hasBinary = readLongOrNullFromResultSet(this.rs,
field++);
Boolean deletedOnce =
readBooleanOrNullFromResultSet(this.rs, field++);
long schemaVersion = tmd.hasVersion() ?
readLongFromResultSet(rs, field++) : 0;
+ long sdType = tmd.hasSplitDocs() ?
readLongFromResultSet(rs, field++) : 0;
+ long sdMaxRevTime = tmd.hasSplitDocs() ?
readLongFromResultSet(rs, field++) : 0;
String data = this.rs.getString(field++);
byte[] bdata = this.rs.getBytes(field++);
- return new RDBRow(id, hasBinary, deletedOnce, modified,
modcount, cmodcount, schemaVersion, data, bdata);
+ return new RDBRow(id, hasBinary, deletedOnce, modified,
modcount, cmodcount, schemaVersion, sdType,
+ sdMaxRevTime, data, bdata);
} else {
this.rs = closeResultSet(this.rs);
this.stmt = closeStatement(this.stmt);
@@ -683,7 +705,9 @@ public class RDBDocumentStoreJDBC {
setIdInStatement(tmd, stmt, si++, keyPattern);
}
for (QueryCondition cond : conditions) {
- stmt.setLong(si++, cond.getValue());
+ for (Object o : cond.getOperands()) {
+ stmt.setObject(si++, o);
+ }
}
if (limit != Integer.MAX_VALUE) {
stmt.setFetchSize(limit);
@@ -698,7 +722,9 @@ public class RDBDocumentStoreJDBC {
for (List<String> keys : Iterables.partition(allKeys,
RDBJDBCTools.MAX_IN_CLAUSE)) {
PreparedStatementComponent inClause =
RDBJDBCTools.createInStatement("ID", keys, tmd.isIdBinary());
StringBuilder query = new StringBuilder();
- if (tmd.hasVersion()) {
+ if (tmd.hasSplitDocs()) {
+ query.append("select ID, MODIFIED, MODCOUNT, CMODCOUNT,
HASBINARY, DELETEDONCE, VERSION, SDTYPE, SDMAXREVTIME, DATA, BDATA from ");
+ } else if (tmd.hasVersion()) {
query.append("select ID, MODIFIED, MODCOUNT, CMODCOUNT,
HASBINARY, DELETEDONCE, VERSION, DATA, BDATA from ");
} else {
query.append("select ID, MODIFIED, MODCOUNT, CMODCOUNT,
HASBINARY, DELETEDONCE, DATA, BDATA from ");
@@ -722,9 +748,12 @@ public class RDBDocumentStoreJDBC {
Long hasBinary = readLongOrNullFromResultSet(rs, field++);
Boolean deletedOnce = readBooleanOrNullFromResultSet(rs,
field++);
long schemaVersion = tmd.hasVersion() ?
readLongFromResultSet(rs, field++) : 0;
+ long sdType = tmd.hasSplitDocs() ?
readLongFromResultSet(rs, field++) : 0;
+ long sdMaxRevTime = tmd.hasSplitDocs() ?
readLongFromResultSet(rs, field++) : 0;
String data = rs.getString(field++);
byte[] bdata = rs.getBytes(field++);
- RDBRow row = new RDBRow(id, hasBinary, deletedOnce,
modified, modcount, cmodcount, schemaVersion, data, bdata);
+ RDBRow row = new RDBRow(id, hasBinary, deletedOnce,
modified, modcount, cmodcount, schemaVersion, sdType,
+ sdMaxRevTime, data, bdata);
rows.add(row);
}
} catch (SQLException ex) {
@@ -755,7 +784,9 @@ public class RDBDocumentStoreJDBC {
boolean useCaseStatement = lastmodcount != -1 && lastmodified >= 1;
StringBuffer sql = new StringBuffer();
String fields;
- if (tmd.hasVersion()) {
+ if (tmd.hasSplitDocs()) {
+ fields = "MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY, DELETEDONCE,
VERSION, SDTYPE, SDMAXREVTIME, ";
+ } else if (tmd.hasVersion()) {
fields = "MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY, DELETEDONCE,
VERSION, ";
} else {
fields = "MODIFIED, MODCOUNT, CMODCOUNT, HASBINARY, DELETEDONCE, ";
@@ -795,9 +826,12 @@ public class RDBDocumentStoreJDBC {
Long hasBinary = readLongOrNullFromResultSet(rs, field++);
Boolean deletedOnce = readBooleanOrNullFromResultSet(rs,
field++);
long schemaVersion = tmd.hasVersion() ?
readLongFromResultSet(rs, field++) : 0;
+ long sdType = tmd.hasSplitDocs() ? readLongFromResultSet(rs,
field++) : 0;
+ long sdMaxRevTime = tmd.hasSplitDocs() ?
readLongFromResultSet(rs, field++) : 0;
String data = rs.getString(field++);
byte[] bdata = rs.getBytes(field++);
- return new RDBRow(id, hasBinary, deletedOnce, modified,
modcount, cmodcount, schemaVersion, data, bdata);
+ return new RDBRow(id, hasBinary, deletedOnce, modified,
modcount, cmodcount, schemaVersion, sdType, sdMaxRevTime,
+ data, bdata);
} else {
return null;
}
@@ -873,6 +907,9 @@ public class RDBDocumentStoreJDBC {
tmp.put(NodeDocument.HAS_BINARY_FLAG, "HASBINARY");
tmp.put(NodeDocument.DELETED_ONCE, "DELETEDONCE");
tmp.put(COLLISIONSMODCOUNT, "CMODCOUNT");
+ tmp.put(NodeDocument.SD_TYPE, "SDTYPE");
+ tmp.put(NodeDocument.SD_MAX_REV_TIME_IN_SECS, "SDMAXREVTIME");
+ tmp.put(RDBDocumentStore.VERSIONPROP, "VERSION");
INDEXED_PROP_MAPPING = Collections.unmodifiableMap(tmp);
}
@@ -884,6 +921,10 @@ public class RDBDocumentStoreJDBC {
tmp.add("<=");
tmp.add("<");
tmp.add("=");
+ tmp.add("in");
+ tmp.add("is null");
+ tmp.add("is not null");
+ tmp.add("null or <");
SUPPORTED_OPS = Collections.unmodifiableSet(tmp);
}
@@ -917,7 +958,34 @@ public class RDBDocumentStoreJDBC {
String indexedProperty = cond.getPropertyName();
String column = INDEXED_PROP_MAPPING.get(indexedProperty);
if (column != null) {
- result.append(whereSep).append(column).append("
").append(op).append(" ?");
+ String realOperand = op;
+ boolean allowNull = false;
+ if (op.startsWith("null or ")) {
+ realOperand = op.substring("null or ".length());
+ allowNull = true;
+ }
+ result.append(whereSep);
+ if (allowNull) {
+ result.append("(").append(column).append(" is null or ");
+ }
+ result.append(column).append(" ").append(realOperand);
+
+ List<? extends Object> operands = cond.getOperands();
+ if (operands.size() == 1) {
+ result.append(" ?");
+ } else if (operands.size() > 1) {
+ result.append(" (");
+ for (int i = 0; i < operands.size(); i++) {
+ result.append("?");
+ if (i < operands.size() - 1) {
+ result.append(", ");
+ }
+ }
+ result.append(") ");
+ }
+ if (allowNull) {
+ result.append(")");
+ }
whereSep = " and ";
} else {
throw new DocumentStoreException("unsupported indexed
property: " + indexedProperty);
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBExport.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBExport.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBExport.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBExport.java
Wed Dec 6 17:54:14 2017
@@ -222,7 +222,7 @@ public class RDBExport {
try {
RDBRow row = new RDBRow(id, "1".equals(shasbinary) ? 1L : 0L,
"1".equals(sdeletedonce),
smodified.length() == 0 ? 0 :
Long.parseLong(smodified), Long.parseLong(smodcount),
- Long.parseLong(scmodcount), -1L, sdata, bytes);
+ Long.parseLong(scmodcount), -1L, -1L, -1L, sdata,
bytes);
StringBuilder fulljson = dumpRow(ser, id, row);
if (format == Format.CSV) {
out.println(asCSV(fieldNames, fulljson));
@@ -324,7 +324,7 @@ public class RDBExport {
String data = rs.getString("DATA");
byte[] bdata = rs.getBytes("BDATA");
- RDBRow row = new RDBRow(id, hasBinary, deletedOnce, modified,
modcount, cmodcount, -1L, data, bdata);
+ RDBRow row = new RDBRow(id, hasBinary, deletedOnce, modified,
modcount, cmodcount, -1L, -1L, -1L, data, bdata);
StringBuilder fulljson = dumpRow(ser, id, row);
if (format == Format.CSV) {
out.println(asCSV(fieldNames, fulljson));
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBHelper.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBHelper.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBHelper.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBHelper.java
Wed Dec 6 17:54:14 2017
@@ -47,7 +47,7 @@ public class RDBHelper {
for (String table : RDBDocumentStore.getTableNames()) {
System.out.println(" " + ddb.getTableCreationStatement(table,
defaultOpts.getInitialSchema()));
- for (String s : ddb.getIndexCreationStatements(table)) {
+ for (String s : ddb.getIndexCreationStatements(table,
defaultOpts.getInitialSchema())) {
System.out.println(" " + s);
}
for (int level = initial + 1; level <= upgradeTo; level++) {
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java
Wed Dec 6 17:54:14 2017
@@ -25,9 +25,9 @@ public class RDBOptions {
private boolean dropTablesOnClose = false;
private String tablePrefix = "";
- private int initialSchema =
Integer.getInteger("org.apache.jackrabbit.oak.plugins.document.rdb.RDBOptions.INITIALSCHEMA",
1);
+ private int initialSchema =
Integer.getInteger("org.apache.jackrabbit.oak.plugins.document.rdb.RDBOptions.INITIALSCHEMA",
2);
private int upgradeToSchema =
Integer.getInteger("org.apache.jackrabbit.oak.plugins.document.rdb.RDBOptions.UPGRADETOSCHEMA",
- 1);
+ 2);
public RDBOptions() {
}
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBRow.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBRow.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBRow.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBRow.java
Wed Dec 6 17:54:14 2017
@@ -34,11 +34,12 @@ public class RDBRow {
private final Boolean deletedOnce;
private final long modified, modcount, cmodcount;
private final long schemaVersion;
+ private final long sdType, sdMaxRevTime;
private final String data;
private final byte[] bdata;
public RDBRow(String id, Long hasBinaryProperties, Boolean deletedOnce,
Long modified, Long modcount, Long cmodcount,
- Long schemaVersion, String data, byte[] bdata) {
+ Long schemaVersion, Long sdType, Long sdMaxRevTime, String data,
byte[] bdata) {
this.id = id;
this.hasBinaryProperties = hasBinaryProperties;
this.deletedOnce = deletedOnce;
@@ -46,6 +47,8 @@ public class RDBRow {
this.modcount = modcount != null ? modcount.longValue() : LONG_UNSET;
this.cmodcount = cmodcount != null ? cmodcount.longValue() :
LONG_UNSET;
this.schemaVersion = schemaVersion != null ? schemaVersion.longValue()
: LONG_UNSET;
+ this.sdType = sdType != null ? sdType.longValue() : LONG_UNSET;
+ this.sdMaxRevTime = sdMaxRevTime != null ? sdMaxRevTime.longValue() :
LONG_UNSET;
this.data = data;
this.bdata = bdata;
}
@@ -98,6 +101,20 @@ public class RDBRow {
return schemaVersion;
}
+ /**
+ * @return {@link #LONG_UNSET} when not set in the database
+ */
+ public long getSdType() {
+ return sdType;
+ }
+
+ /**
+ * @return {@link #LONG_UNSET} when not set in the database
+ */
+ public long getSdMaxRevTime() {
+ return sdMaxRevTime;
+ }
+
@CheckForNull
public byte[] getBdata() {
return bdata;
Modified:
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBVersionGCSupport.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBVersionGCSupport.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBVersionGCSupport.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBVersionGCSupport.java
Wed Dec 6 17:54:14 2017
@@ -16,9 +16,8 @@
*/
package org.apache.jackrabbit.oak.plugins.document.rdb;
-import static com.google.common.collect.Iterables.filter;
-
import java.io.Closeable;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -32,6 +31,7 @@ import org.apache.jackrabbit.oak.plugins
import org.apache.jackrabbit.oak.plugins.document.RevisionVector;
import org.apache.jackrabbit.oak.plugins.document.VersionGCSupport;
import
org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.QueryCondition;
+import
org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.UnsupportedIndexedPropertyException;
import org.apache.jackrabbit.oak.plugins.document.util.CloseableIterable;
import org.apache.jackrabbit.oak.plugins.document.util.Utils;
import org.apache.jackrabbit.oak.stats.Clock;
@@ -39,6 +39,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
/**
* RDB specific version of {@link VersionGCSupport} which uses an extended
query
@@ -68,20 +70,53 @@ public class RDBVersionGCSupport extends
protected Iterable<NodeDocument> identifyGarbage(final Set<SplitDocType>
gcTypes,
final RevisionVector
sweepRevs,
final long
oldestRevTimeStamp) {
- List<QueryCondition> conditions = Collections.emptyList();
- // absent support for SDTYPE as indexed property: exclude those
- // documents from the query which definitively aren't split documents
+ Iterable<NodeDocument> it1;
+ Iterable<NodeDocument> it2;
+
+ // for schema 0 or 1 rows, we'll have to constrain the path
List<String> excludeKeyPatterns = Arrays.asList("_:/%", "__:/%",
"___:/%");
- Iterable<NodeDocument> it = store.queryAsIterable(Collection.NODES,
null, null, excludeKeyPatterns, conditions,
- Integer.MAX_VALUE, null);
- return CloseableIterable.wrap(filter(it, new Predicate<NodeDocument>()
{
+
+ try {
+ List<Integer> gcTypeCodes = Lists.newArrayList();
+ for (SplitDocType type : gcTypes) {
+ gcTypeCodes.add(type.typeCode());
+ }
+
+ List<QueryCondition> conditions1 = new ArrayList<QueryCondition>();
+ conditions1.add(new QueryCondition(NodeDocument.SD_TYPE, "in",
gcTypeCodes));
+ conditions1.add(new QueryCondition(RDBDocumentStore.VERSIONPROP,
">=", 2));
+ it1 = store.queryAsIterable(Collection.NODES, null, null,
Collections.emptyList(), conditions1,
+ Integer.MAX_VALUE, null);
+
+ List<QueryCondition> conditions2 = new ArrayList<QueryCondition>();
+ conditions2.add(new QueryCondition(RDBDocumentStore.VERSIONPROP,
"null or <", 2));
+ it2 = store.queryAsIterable(Collection.NODES, null, null,
excludeKeyPatterns, conditions2,
+ Integer.MAX_VALUE, null);
+ } catch (UnsupportedIndexedPropertyException ex) {
+ // this will happen if we query a table that doesn't have the SD*
+ // columns - create a new query without the constraint, and let the
+ // Java code filter the results
+ it1 = store.queryAsIterable(Collection.NODES, null, null,
excludeKeyPatterns, Collections.emptyList(),
+ Integer.MAX_VALUE, null);
+ it2 = Collections.emptySet();
+ }
+
+ final Iterable<NodeDocument> fit1 = it1;
+ final Iterable<NodeDocument> fit2 = it2;
+
+ return CloseableIterable.wrap(Iterables.filter(Iterables.concat(fit1,
fit2), new Predicate<NodeDocument>() {
@Override
public boolean apply(NodeDocument doc) {
- return gcTypes.contains(doc.getSplitDocType())
- && doc.hasAllRevisionLessThan(oldestRevTimeStamp)
+ return gcTypes.contains(doc.getSplitDocType()) &&
doc.hasAllRevisionLessThan(oldestRevTimeStamp)
&& !isDefaultNoBranchSplitNewerThan(doc, sweepRevs);
}
- }), (Closeable) it);
+ }), new Closeable() {
+ @Override
+ public void close() throws IOException {
+ Utils.closeIfCloseable(fit1);
+ Utils.closeIfCloseable(fit2);
+ }
+ });
}
@Override
Modified:
jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializerTest.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializerTest.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializerTest.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentSerializerTest.java
Wed Dec 6 17:54:14 2017
@@ -55,7 +55,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testSimpleString() {
- RDBRow row = new RDBRow("_foo", 1L, true, 1l, 2l, 3l, 0L, "{}", null);
+ RDBRow row = new RDBRow("_foo", 1L, true, 1l, 2l, 3l, 0L, 0L, 0L,
"{}", null);
NodeDocument doc = this.ser.fromRow(Collection.NODES, row);
assertEquals("_foo", doc.getId());
assertEquals(true, doc.hasBinary());
@@ -65,7 +65,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testNoSysprops() {
- RDBRow row = new RDBRow("_foo", null, null, 1l, 2l, 3l, 0L, "{}",
null);
+ RDBRow row = new RDBRow("_foo", null, null, 1l, 2l, 3l, 0L, 0L, 0L,
"{}", null);
NodeDocument doc = this.ser.fromRow(Collection.NODES, row);
assertEquals("_foo", doc.getId());
assertEquals(false, doc.hasBinary());
@@ -77,7 +77,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testSimpleBlob() throws UnsupportedEncodingException {
- RDBRow row = new RDBRow("_foo", 0L, false, 1l, 2l, 3l, 0L, "\"blob\"",
"{}".getBytes("UTF-8"));
+ RDBRow row = new RDBRow("_foo", 0L, false, 1l, 2l, 3l, 0L, 0L, 0L,
"\"blob\"", "{}".getBytes("UTF-8"));
NodeDocument doc = this.ser.fromRow(Collection.NODES, row);
assertEquals("_foo", doc.getId());
assertEquals(false, doc.hasBinary());
@@ -86,7 +86,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testSimpleBlob2() throws UnsupportedEncodingException {
- RDBRow row = new RDBRow("_foo", 0L, false, 1l, 2l, 3l, 0L, "\"blob\"",
+ RDBRow row = new RDBRow("_foo", 0L, false, 1l, 2l, 3l, 0L, 0L, 0L,
"\"blob\"",
"{\"s\":\"string\", \"b\":true, \"i\":1}".getBytes("UTF-8"));
NodeDocument doc = this.ser.fromRow(Collection.NODES, row);
assertEquals("_foo", doc.getId());
@@ -100,7 +100,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testSimpleBoth() throws UnsupportedEncodingException {
try {
- RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, "{}",
"{}".getBytes("UTF-8"));
+ RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, 0L, 0L,
"{}", "{}".getBytes("UTF-8"));
this.ser.fromRow(Collection.NODES, row);
fail("should fail");
} catch (DocumentStoreException expected) {
@@ -109,7 +109,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testBlobAndDiff() throws UnsupportedEncodingException {
- RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L,
+ RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, 0L, 0L,
"\"blob\", [[\"=\", \"foo\", \"bar\"],[\"M\", \"m1\",
1],[\"M\", \"m2\", 3]]",
"{\"m1\":2, \"m2\":2}".getBytes("UTF-8"));
NodeDocument doc = this.ser.fromRow(Collection.NODES, row);
@@ -121,7 +121,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testBlobAndDiffBorked() throws UnsupportedEncodingException {
try {
- RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L,
"[[\"\", \"\", \"\"]]", "{}".getBytes("UTF-8"));
+ RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, 0L, 0L,
"[[\"\", \"\", \"\"]]", "{}".getBytes("UTF-8"));
this.ser.fromRow(Collection.NODES, row);
fail("should fail");
} catch (DocumentStoreException expected) {
@@ -130,7 +130,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testNullModified() throws UnsupportedEncodingException {
- RDBRow row = new RDBRow("_foo", 1L, true, null, 2l, 3l, 0L, "{}",
null);
+ RDBRow row = new RDBRow("_foo", 1L, true, null, 2l, 3l, 0L, 0L, 0L,
"{}", null);
NodeDocument doc = this.ser.fromRow(Collection.NODES, row);
assertNull(doc.getModified());
}
@@ -138,7 +138,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testBrokenJSONTrailingComma() throws
UnsupportedEncodingException {
try {
- RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, "{
\"x\" : 1, }", null);
+ RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, 0L, 0L,
"{ \"x\" : 1, }", null);
this.ser.fromRow(Collection.NODES, row);
fail("should fail");
} catch (DocumentStoreException expected) {
@@ -148,7 +148,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testBrokenJSONUnquotedIdentifier() throws
UnsupportedEncodingException {
try {
- RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, "{ x :
1, }", null);
+ RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, 0L, 0L,
"{ x : 1, }", null);
this.ser.fromRow(Collection.NODES, row);
fail("should fail");
} catch (DocumentStoreException expected) {
@@ -157,7 +157,7 @@ public class RDBDocumentSerializerTest {
@Test
public void testSimpleStringNonAscii() {
- RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L,
"{\"x\":\"\u20ac\uD834\uDD1E\"}", null);
+ RDBRow row = new RDBRow("_foo", 1L, false, 1l, 2l, 3l, 0L, 0L, 0L,
"{\"x\":\"\u20ac\uD834\uDD1E\"}", null);
NodeDocument doc = this.ser.fromRow(Collection.NODES, row);
assertEquals("_foo", doc.getId());
assertEquals("\u20ac\uD834\uDD1E", doc.get("x"));
Modified:
jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreSchemaUpgradeTest.java
URL:
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreSchemaUpgradeTest.java?rev=1817311&r1=1817310&r2=1817311&view=diff
==============================================================================
---
jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreSchemaUpgradeTest.java
(original)
+++
jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStoreSchemaUpgradeTest.java
Wed Dec 6 17:54:14 2017
@@ -18,10 +18,14 @@ package org.apache.jackrabbit.oak.plugin
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Set;
import javax.sql.DataSource;
@@ -29,6 +33,9 @@ import org.apache.jackrabbit.oak.commons
import org.apache.jackrabbit.oak.plugins.document.Collection;
import org.apache.jackrabbit.oak.plugins.document.DocumentMK;
import org.apache.jackrabbit.oak.plugins.document.DocumentStoreFixture;
+import org.apache.jackrabbit.oak.plugins.document.NodeDocument;
+import org.apache.jackrabbit.oak.plugins.document.NodeDocument.SplitDocType;
+import org.apache.jackrabbit.oak.plugins.document.RevisionVector;
import org.apache.jackrabbit.oak.plugins.document.UpdateOp;
import
org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.RDBTableMetaData;
import org.apache.jackrabbit.oak.plugins.document.util.Utils;
@@ -128,6 +135,59 @@ public class RDBDocumentStoreSchemaUpgra
}
@Test
+ public void init0then2() {
+ RDBOptions op = new
RDBOptions().tablePrefix("T0T2").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true);
+ RDBDocumentStore rdb0 = null;
+ RDBDocumentStore rdb1 = null;
+ try {
+ rdb0 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
+ RDBTableMetaData meta0 = rdb0.getTable(Collection.NODES);
+ assertFalse(meta0.hasVersion());
+ rdb1 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), new
RDBOptions().tablePrefix("T0T2").initialSchema(0).upgradeToSchema(2));
+ RDBTableMetaData meta1 = rdb1.getTable(Collection.NODES);
+ assertTrue(meta1.hasVersion());
+ UpdateOp testInsert = new UpdateOp(Utils.getIdFromPath("/foo"),
true);
+ testInsert.set(NodeDocument.SD_TYPE, 123L);
+ assertTrue(rdb1.create(Collection.NODES,
Collections.singletonList(testInsert)));
+ // check that old instance can read a new entry
+ NodeDocument check = rdb0.find(Collection.NODES,
Utils.getIdFromPath("/foo"));
+ assertNotNull(check);
+ assertEquals(123L, check.get(NodeDocument.SD_TYPE));
+ } finally {
+ if (rdb1 != null) {
+ rdb1.dispose();
+ }
+ if (rdb0 != null) {
+ rdb0.dispose();
+ }
+ }
+ }
+
+ @Test
+ public void init12() {
+ LogCustomizer logCustomizer =
LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO)
+ .contains("to DB level 2").create();
+ logCustomizer.starting();
+
+ RDBOptions op = new
RDBOptions().tablePrefix("T12").initialSchema(1).upgradeToSchema(2).dropTablesOnClose(true);
+ RDBDocumentStore rdb = null;
+ try {
+ rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
+ RDBTableMetaData meta = rdb.getTable(Collection.NODES);
+ assertEquals(op.getTablePrefix() + "_NODES", meta.getName());
+ assertTrue(meta.hasSplitDocs());
+ int statementsPerTable = 4;
+ assertEquals("unexpected # of log entries: " +
logCustomizer.getLogs(),
+ statementsPerTable *
RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size());
+ } finally {
+ logCustomizer.finished();
+ if (rdb != null) {
+ rdb.dispose();
+ }
+ }
+ }
+
+ @Test
public void init01fail() {
LogCustomizer logCustomizer =
LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO)
.contains("Attempted to upgrade").create();
@@ -178,4 +238,150 @@ public class RDBDocumentStoreSchemaUpgra
}
}
}
+
+ @Test
+ public void init22() {
+ LogCustomizer logCustomizer =
LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO)
+ .contains("to DB level").create();
+ logCustomizer.starting();
+
+ RDBOptions op = new RDBOptions().tablePrefix("T" +
"22").initialSchema(2).upgradeToSchema(2).dropTablesOnClose(true);
+ RDBDocumentStore rdb = null;
+ try {
+ rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
+ RDBTableMetaData meta = rdb.getTable(Collection.NODES);
+ assertEquals(op.getTablePrefix() + "_NODES", meta.getName());
+ assertTrue(meta.hasVersion());
+ assertTrue(meta.hasSplitDocs());
+ assertEquals("unexpected # of log entries: " +
logCustomizer.getLogs(), 0, logCustomizer.getLogs().size());
+ } finally {
+ logCustomizer.finished();
+ if (rdb != null) {
+ rdb.dispose();
+ }
+ }
+ }
+
+ @Test
+ public void testVersionGCOnOldDB() {
+ RDBOptions op = new
RDBOptions().tablePrefix("T11").initialSchema(1).upgradeToSchema(1).dropTablesOnClose(true);
+ RDBDocumentStore rdb = null;
+ Iterable<NodeDocument> garbage = null;
+ try {
+ rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op);
+ RDBTableMetaData meta = rdb.getTable(Collection.NODES);
+ assertEquals(op.getTablePrefix() + "_NODES", meta.getName());
+ assertTrue(meta.hasVersion());
+ RDBVersionGCSupport vgc = new RDBVersionGCSupport(rdb);
+ Set<NodeDocument.SplitDocType> gctypes =
EnumSet.of(SplitDocType.DEFAULT_LEAF, SplitDocType.COMMIT_ROOT_ONLY,
+ SplitDocType.DEFAULT_NO_BRANCH);
+ garbage = vgc.identifyGarbage(gctypes, new RevisionVector(), 0L);
+ int cnt = 0;
+ for (NodeDocument g : garbage) {
+ // get rid of compiler warning about g not being used
+ if (g.getId() != null) {
+ cnt++;
+ }
+ }
+ assertTrue(cnt == 0);
+ } finally {
+ if (garbage != null) {
+ Utils.closeIfCloseable(garbage);
+ }
+ if (rdb != null) {
+ rdb.dispose();
+ }
+ }
+ }
+
+ @Test
+ public void testVersionGCOnMixedModeDB() {
+ long sdmaxrev = 1L;
+ RDBDocumentStore rdb0 = null;
+ RDBDocumentStore rdb1 = null;
+ RDBDocumentStore rdb2 = null;
+ Iterable<NodeDocument> garbage = null;
+ Set<String> expected = new HashSet<String>();
+ try {
+ // create schema-0 ds and write one split document and one regular
document
+ {
+ RDBOptions options = new
RDBOptions().tablePrefix("TMIXED").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true);
+ rdb0 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(),
options);
+ RDBTableMetaData meta = rdb0.getTable(Collection.NODES);
+ assertFalse(meta.hasVersion());
+ assertFalse(meta.hasSplitDocs());
+
+ ArrayList<UpdateOp> ops = new ArrayList<UpdateOp>();
+
+ UpdateOp op01 = new UpdateOp("1:p/a", true);
+ op01.set(NodeDocument.SD_TYPE,
SplitDocType.DEFAULT_LEAF.typeCode());
+ op01.set(NodeDocument.SD_MAX_REV_TIME_IN_SECS, sdmaxrev);
+
+ UpdateOp op02 = new UpdateOp(Utils.getIdFromPath("/regular"),
true);
+
+ ops.add(op01);
+ ops.add(op02);
+
+ assertTrue(rdb0.create(Collection.NODES, ops));
+ expected.add(op01.getId());
+ }
+
+ // upgrade to schema 1 and write one split document
+ {
+ RDBOptions options = new
RDBOptions().tablePrefix("TMIXED").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(false);
+ rdb1 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(),
options);
+ RDBTableMetaData meta = rdb1.getTable(Collection.NODES);
+ assertTrue(meta.hasVersion());
+ assertFalse(meta.hasSplitDocs());
+
+ UpdateOp op1 = new UpdateOp("1:p/b", true);
+ op1.set(NodeDocument.SD_TYPE,
SplitDocType.DEFAULT_LEAF.typeCode());
+ op1.set(NodeDocument.SD_MAX_REV_TIME_IN_SECS, sdmaxrev);
+
+ assertTrue(rdb1.create(Collection.NODES,
Collections.singletonList(op1)));
+ expected.add(op1.getId());
+ }
+
+ // upgrade to schema 2, add another split document
+ {
+ RDBOptions options2 = new
RDBOptions().tablePrefix("TMIXED").initialSchema(0).upgradeToSchema(2).dropTablesOnClose(false);
+ rdb2 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(),
options2);
+ RDBTableMetaData meta2 = rdb2.getTable(Collection.NODES);
+ assertTrue(meta2.hasVersion());
+ assertTrue(meta2.hasSplitDocs());
+
+ UpdateOp op2 = new UpdateOp("1:p/c", true);
+ op2.set(NodeDocument.SD_TYPE,
SplitDocType.COMMIT_ROOT_ONLY.typeCode());
+ op2.set(NodeDocument.SD_MAX_REV_TIME_IN_SECS, sdmaxrev);
+
+ assertTrue(rdb2.create(Collection.NODES,
Collections.singletonList(op2)));
+ expected.add(op2.getId());
+ }
+
+ // GC should find both
+ RDBVersionGCSupport vgc = new RDBVersionGCSupport(rdb2);
+ Set<NodeDocument.SplitDocType> gctypes =
EnumSet.of(SplitDocType.DEFAULT_LEAF, SplitDocType.COMMIT_ROOT_ONLY,
+ SplitDocType.DEFAULT_NO_BRANCH);
+ garbage = vgc.identifyGarbage(gctypes, new RevisionVector(),
sdmaxrev * 1000 + 10000);
+ Set<String> found = new HashSet<String>();
+ for (NodeDocument g : garbage) {
+ found.add(g.getId());
+ }
+ assertEquals(expected, found);
+
+ } finally {
+ if (garbage != null) {
+ Utils.closeIfCloseable(garbage);
+ }
+ if (rdb2 != null) {
+ rdb2.dispose();
+ }
+ if (rdb1 != null) {
+ rdb1.dispose();
+ }
+ if (rdb0 != null) {
+ rdb0.dispose();
+ }
+ }
+ }
}