This is an automated email from the ASF dual-hosted git repository.
kadir pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git
The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
new 6262b5c PHOENIX-5505 Index read repair does not repair unverified
rows with higher timestamp
6262b5c is described below
commit 6262b5c7a53fdc017229d5d6395be424c1888436
Author: Kadir <[email protected]>
AuthorDate: Wed Oct 2 12:21:24 2019 -0700
PHOENIX-5505 Index read repair does not repair unverified rows with higher
timestamp
---
.../end2end/index/GlobalIndexCheckerIT.java | 121 ++++++++++++++++++++-
.../coprocessor/BaseScannerRegionObserver.java | 1 -
.../UngroupedAggregateRegionObserver.java | 4 -
.../apache/phoenix/index/GlobalIndexChecker.java | 80 ++++++++++----
.../org/apache/phoenix/query/QueryServices.java | 2 -
.../apache/phoenix/query/QueryServicesOptions.java | 1 -
.../java/org/apache/phoenix/query/BaseTest.java | 3 -
7 files changed, 175 insertions(+), 37 deletions(-)
diff --git
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexCheckerIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexCheckerIT.java
index 6a28e71..c1b839d 100644
---
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexCheckerIT.java
+++
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexCheckerIT.java
@@ -30,6 +30,8 @@ import java.util.List;
import java.util.Map;
import com.google.common.collect.Maps;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
import org.apache.phoenix.end2end.IndexToolIT;
import org.apache.phoenix.hbase.index.IndexRegionObserver;
@@ -44,6 +46,7 @@ import com.google.common.collect.Lists;
@RunWith(Parameterized.class)
public class GlobalIndexCheckerIT extends BaseUniqueNamesOwnClusterIT {
+ private static final Log LOG =
LogFactory.getLog(GlobalIndexCheckerIT.class);
private final boolean async;
private final String tableDDLOptions;
@@ -96,7 +99,7 @@ public class GlobalIndexCheckerIT extends
BaseUniqueNamesOwnClusterIT {
@Test
public void testSkipPostIndexDeleteUpdate() throws Exception {
String dataTableName = generateUniqueName();
- populateTable(dataTableName);
+ populateTable(dataTableName); // with two rows ('a', 'ab', 'abc',
'abcd') and ('b', 'bc', 'bcd', 'bcde')
Connection conn = DriverManager.getConnection(getUrl());
String indexName = generateUniqueName();
conn.createStatement().execute("CREATE INDEX " + indexName + " on " +
@@ -139,7 +142,7 @@ public class GlobalIndexCheckerIT extends
BaseUniqueNamesOwnClusterIT {
@Test
public void testPartialRowUpdate() throws Exception {
String dataTableName = generateUniqueName();
- populateTable(dataTableName);
+ populateTable(dataTableName); // with two rows ('a', 'ab', 'abc',
'abcd') and ('b', 'bc', 'bcd', 'bcde')
Connection conn = DriverManager.getConnection(getUrl());
String indexName = generateUniqueName();
conn.createStatement().execute("CREATE INDEX " + indexName + " on " +
@@ -180,7 +183,7 @@ public class GlobalIndexCheckerIT extends
BaseUniqueNamesOwnClusterIT {
@Test
public void testSkipPostIndexPartialRowUpdate() throws Exception {
String dataTableName = generateUniqueName();
- populateTable(dataTableName);
+ populateTable(dataTableName); // with two rows ('a', 'ab', 'abc',
'abcd') and ('b', 'bc', 'bcd', 'bcde')
Connection conn = DriverManager.getConnection(getUrl());
String indexName = generateUniqueName();
conn.createStatement().execute("CREATE INDEX " + indexName + " on " +
@@ -208,9 +211,119 @@ public class GlobalIndexCheckerIT extends
BaseUniqueNamesOwnClusterIT {
}
@Test
+ public void testOnePhaseOverwiteFollowingTwoPhaseWrite() throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String dataTableName = generateUniqueName();
+ populateTable(dataTableName); // with two rows ('a', 'ab', 'abc',
'abcd') and ('b', 'bc', 'bcd', 'bcde')
+ String indexTableName = generateUniqueName();
+ conn.createStatement().execute("CREATE INDEX " + indexTableName +
"1 on " +
+ dataTableName + " (val1) include (val2, val3)" + (async ?
"ASYNC" : ""));
+ conn.createStatement().execute("CREATE INDEX " + indexTableName +
"2 on " +
+ dataTableName + " (val2) include (val1, val3)" + (async ?
"ASYNC" : ""));
+ if (async) {
+ // run the index MR job.
+ IndexToolIT.runIndexTool(true, false, null, dataTableName,
indexTableName + "1");
+ IndexToolIT.runIndexTool(true, false, null, dataTableName,
indexTableName + "2");
+ }
+ // Two Phase write. This write is recoverable
+ IndexRegionObserver.setSkipPostIndexUpdatesForTesting(true);
+ conn.createStatement().execute("upsert into " + dataTableName + "
values ('c', 'cd', 'cde', 'cdef')");
+ conn.commit();
+ // One Phase write. This write is not recoverable
+ IndexRegionObserver.setSkipDataTableUpdatesForTesting(true);
+ conn.createStatement().execute("upsert into " + dataTableName + "
values ('c', 'cd', 'cdee', 'cdfg')");
+ conn.commit();
+ // Let three phase writes happen as in the normal case
+ IndexRegionObserver.setSkipDataTableUpdatesForTesting(false);
+ IndexRegionObserver.setSkipPostIndexUpdatesForTesting(false);
+ String selectSql = "SELECT val2, val3 from " + dataTableName + "
WHERE val1 = 'cd'";
+ // Verify that we will read from the first index table
+ assertExplainPlan(conn, selectSql, dataTableName, indexTableName +
"1");
+ // Verify the first write is visible but the second one is not
+ ResultSet rs = conn.createStatement().executeQuery(selectSql);
+ assertTrue(rs.next());
+ assertEquals("cde", rs.getString(1));
+ assertEquals("cdef", rs.getString(2));
+ assertFalse(rs.next());
+ }
+ }
+
+ @Test
+ public void testOnePhaseOverwrite() throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String dataTableName = generateUniqueName();
+ populateTable(dataTableName); // with two rows ('a', 'ab', 'abc',
'abcd') and ('b', 'bc', 'bcd', 'bcde')
+ String indexTableName = generateUniqueName();
+ conn.createStatement().execute("CREATE INDEX " + indexTableName +
"1 on " +
+ dataTableName + " (val1) include (val2, val3)" + (async ?
"ASYNC" : ""));
+ conn.createStatement().execute("CREATE INDEX " + indexTableName +
"2 on " +
+ dataTableName + " (val2) include (val1, val3)" + (async ?
"ASYNC" : ""));
+ if (async) {
+ // run the index MR job.
+ IndexToolIT.runIndexTool(true, false, null, dataTableName,
indexTableName + "1");
+ IndexToolIT.runIndexTool(true, false, null, dataTableName,
indexTableName + "2");
+ }
+ // Configure IndexRegionObserver to skip the last two write phase
(i.e., the data table update and post index
+ // update phase) and check that this does not impact the
correctness (one overwrite)
+ IndexRegionObserver.setSkipDataTableUpdatesForTesting(true);
+ IndexRegionObserver.setSkipPostIndexUpdatesForTesting(true);
+ conn.createStatement().execute("upsert into " + dataTableName + "
(id, val2) values ('a', 'abcc')");
+ conn.commit();
+ IndexRegionObserver.setSkipDataTableUpdatesForTesting(false);
+ IndexRegionObserver.setSkipPostIndexUpdatesForTesting(false);
+ String selectSql = "SELECT val2, val3 from " + dataTableName + "
WHERE val1 = 'ab'";
+ // Verify that we will read from the first index table
+ assertExplainPlan(conn, selectSql, dataTableName, indexTableName +
"1");
+ // Verify that one phase write has no effect
+ ResultSet rs = conn.createStatement().executeQuery(selectSql);
+ assertTrue(rs.next());
+ assertEquals("abc", rs.getString(1));
+ assertEquals("abcd", rs.getString(2));
+ assertFalse(rs.next());
+ selectSql = "SELECT val2, val3 from " + dataTableName + " WHERE
val2 = 'abcc'";
+ // Verify that we will read from the second index table
+ assertExplainPlan(conn, selectSql, dataTableName, indexTableName +
"2");
+ rs = conn.createStatement().executeQuery(selectSql);
+ // Verify that one phase writes have no effect
+ assertFalse(rs.next());
+ // Configure IndexRegionObserver to skip the last two write phase
(i.e., the data table update and post index
+ // update phase) and check that this does not impact the
correctness (two overwrites)
+ IndexRegionObserver.setSkipDataTableUpdatesForTesting(true);
+ IndexRegionObserver.setSkipPostIndexUpdatesForTesting(true);
+ conn.createStatement().execute("upsert into " + dataTableName + "
(id, val2) values ('a', 'abccc')");
+ conn.commit();
+ conn.createStatement().execute("upsert into " + dataTableName + "
(id, val2) values ('a', 'abcccc')");
+ conn.commit();
+ IndexRegionObserver.setSkipDataTableUpdatesForTesting(false);
+ IndexRegionObserver.setSkipPostIndexUpdatesForTesting(false);
+ selectSql = "SELECT val2, val3 from " + dataTableName + " WHERE
val1 = 'ab'";
+ // Verify that we will read from the first index table
+ assertExplainPlan(conn, selectSql, dataTableName, indexTableName +
"1");
+ // Verify that one phase writes have no effect
+ rs = conn.createStatement().executeQuery(selectSql);
+ assertTrue(rs.next());
+ assertEquals("abc", rs.getString(1));
+ assertEquals("abcd", rs.getString(2));
+ assertFalse(rs.next());
+ selectSql = "SELECT val2, val3 from " + dataTableName + " WHERE
val2 = 'abccc'";
+ // Verify that we will read from the second index table
+ assertExplainPlan(conn, selectSql, dataTableName, indexTableName +
"2");
+ rs = conn.createStatement().executeQuery(selectSql);
+ // Verify that one phase writes have no effect
+ assertFalse(rs.next());
+ selectSql = "SELECT val2, val3 from " + dataTableName + " WHERE
val2 = 'abcccc'";
+ // Verify that we will read from the second index table
+ assertExplainPlan(conn, selectSql, dataTableName, indexTableName +
"2");
+ rs = conn.createStatement().executeQuery(selectSql);
+ // Verify that one phase writes have no effect
+ assertFalse(rs.next());
+ }
+ }
+
+ @Test
public void testSkipDataTableAndPostIndexPartialRowUpdate() throws
Exception {
String dataTableName = generateUniqueName();
- populateTable(dataTableName);
+ populateTable(dataTableName); // with two rows ('a', 'ab', 'abc',
'abcd') and ('b', 'bc', 'bcd', 'bcde')
Connection conn = DriverManager.getConnection(getUrl());
String indexName = generateUniqueName();
conn.createStatement().execute("CREATE INDEX " + indexName + "1 on " +
diff --git
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 24374b6..b73615f 100644
---
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -112,7 +112,6 @@ abstract public class BaseScannerRegionObserver extends
BaseRegionObserver {
public static final String PHYSICAL_DATA_TABLE_NAME =
"_PhysicalDataTableName";
public static final String EMPTY_COLUMN_FAMILY_NAME = "_EmptyCFName";
public static final String EMPTY_COLUMN_QUALIFIER_NAME = "_EmptyCQName";
- public static final String SCAN_LIMIT = "_ScanLimit";
public final static byte[] REPLAY_TABLE_AND_INDEX_WRITES =
PUnsignedTinyint.INSTANCE.toBytes(1);
public final static byte[] REPLAY_ONLY_INDEX_WRITES =
PUnsignedTinyint.INSTANCE.toBytes(2);
diff --git
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index b9a0f11..3a03f94 100644
---
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -1067,8 +1067,6 @@ public class UngroupedAggregateRegionObserver extends
BaseScannerRegionObserver
indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
}
byte[] clientVersionBytes =
scan.getAttribute(BaseScannerRegionObserver.CLIENT_VERSION);
- byte[] scanLimitBytes =
scan.getAttribute(BaseScannerRegionObserver.SCAN_LIMIT);
- int scanLimit = (scanLimitBytes != null) ? Bytes.toInt(scanLimitBytes)
: 0;
boolean hasMore;
int rowCount = 0;
try {
@@ -1123,8 +1121,6 @@ public class UngroupedAggregateRegionObserver extends
BaseScannerRegionObserver
mutations.clear();
}
rowCount++;
- if (rowCount == scanLimit)
- break;
}
} while (hasMore);
diff --git
a/phoenix-core/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
b/phoenix-core/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
index 0fc6416..ba3bd6d 100644
---
a/phoenix-core/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
+++
b/phoenix-core/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
@@ -32,6 +32,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
@@ -80,9 +81,9 @@ public class GlobalIndexChecker extends BaseRegionObserver {
private class GlobalIndexScanner implements RegionScanner {
RegionScanner scanner;
private long ageThreshold;
- private int repairCount;
private Scan scan;
private Scan indexScan;
+ private Scan singleRowIndexScan;
private Scan buildIndexScan = null;
private Table dataHTable = null;
private byte[] emptyCF;
@@ -110,9 +111,6 @@ public class GlobalIndexChecker extends BaseRegionObserver {
ageThreshold = env.getConfiguration().getLong(
QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB,
QueryServicesOptions.DEFAULT_GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS);
- repairCount = env.getConfiguration().getInt(
- QueryServices.GLOBAL_INDEX_ROW_REPAIR_COUNT_ATTRIB,
- QueryServicesOptions.DEFAULT_GLOBAL_INDEX_REPAIR_COUNT);
minTimestamp = scan.getTimeRange().getMin();
maxTimestamp = scan.getTimeRange().getMax();
}
@@ -209,9 +207,13 @@ public class GlobalIndexChecker extends BaseRegionObserver
{
}
}
- private void deleteRowIfAgedEnough(byte[] indexRowKey, long ts) throws
IOException {
+ private void deleteRowIfAgedEnough(byte[] indexRowKey, List<Cell> row,
long ts) throws IOException {
if ((EnvironmentEdgeManager.currentTimeMillis() - ts) >
ageThreshold) {
Delete del = new Delete(indexRowKey, ts);
+ // We are deleting a specific version of a row so the flowing
loop is for that
+ for (Cell cell : row) {
+ del.addColumn(CellUtil.cloneFamily(cell),
CellUtil.cloneQualifier(cell), cell.getTimestamp());
+ }
Mutation[] mutations = new Mutation[]{del};
region.batchMutate(mutations, HConstants.NO_NONCE,
HConstants.NO_NONCE);
}
@@ -222,6 +224,7 @@ public class GlobalIndexChecker extends BaseRegionObserver {
if (buildIndexScan == null) {
buildIndexScan = new Scan();
indexScan = new Scan(scan);
+ singleRowIndexScan = new Scan(scan);
byte[] dataTableName =
scan.getAttribute(PHYSICAL_DATA_TABLE_NAME);
byte[] indexTableName =
region.getRegionInfo().getTable().getName();
dataHTable = hTableFactory.getTable(new
ImmutableBytesPtr(dataTableName));
@@ -243,13 +246,16 @@ public class GlobalIndexChecker extends
BaseRegionObserver {
buildIndexScan.setAttribute(BaseScannerRegionObserver.UNGROUPED_AGG,
TRUE_BYTES);
buildIndexScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD,
scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD));
buildIndexScan.setAttribute(BaseScannerRegionObserver.REBUILD_INDEXES,
TRUE_BYTES);
-
buildIndexScan.setAttribute(BaseScannerRegionObserver.SCAN_LIMIT,
Bytes.toBytes(repairCount));
buildIndexScan.setAttribute(BaseScannerRegionObserver.SKIP_REGION_BOUNDARY_CHECK,
Bytes.toBytes(true));
}
- // Rebuild the index rows from the corresponding the rows in the
the data table
+ // Rebuild the index row from the corresponding the row in the the
data table
+ // Get the data row key from the index row key
byte[] dataRowKey = indexMaintainer.buildDataRowKey(new
ImmutableBytesWritable(indexRowKey), viewConstants);
buildIndexScan.withStartRow(dataRowKey, true);
- buildIndexScan.setTimeRange(ts, maxTimestamp);
+ buildIndexScan.withStopRow(dataRowKey, true);
+ buildIndexScan.setTimeRange(0, maxTimestamp);
+ // If the data table row has been deleted then we want to delete
the corresponding index row too.
+ // Thus, we are using a raw scan
buildIndexScan.setRaw(true);
try (ResultScanner resultScanner =
dataHTable.getScanner(buildIndexScan)){
resultScanner.next();
@@ -261,27 +267,57 @@ public class GlobalIndexChecker extends
BaseRegionObserver {
// Open a new scanner starting from the current row
indexScan.withStartRow(indexRowKey, true);
scanner = region.getScanner(indexScan);
- // Scan the newly build index rows
+ // Scan the newly build index row
scanner.next(row);
if (row.isEmpty()) {
return;
}
- // Check if the corresponding data table row exist
- if (Bytes.compareTo(row.get(0).getRowArray(),
row.get(0).getRowOffset(), row.get(0).getRowLength(),
+ boolean indexRowExists = false;
+ // Check if the index row still exist after rebuild
+ while (Bytes.compareTo(row.get(0).getRowArray(),
row.get(0).getRowOffset(), row.get(0).getRowLength(),
indexRowKey, 0, indexRowKey.length) == 0) {
- if (!verifyRowAndRemoveEmptyColumn(row)) {
- // The corresponding row does not exist in the data table.
- // Need to delete the row from index if it is old enough
- deleteRowIfAgedEnough(indexRowKey, ts);
- row.clear();
+ indexRowExists = true;
+ if (verifyRowAndRemoveEmptyColumn(row)) {
+ // The index row status is "verified". This row is good to
return to the client. We are done here.
+ return;
}
- return;
+ // The index row is still "unverified" after rebuild. This
means either that the data table row timestamp is
+ // lower than than the timestamp of the unverified index row
(ts) and the index row that is built from
+ // the data table row is masked by this unverified row, or
that the corresponding data table row does
+ // exist
+ // First delete the unverified row from index if it is old
enough
+ deleteRowIfAgedEnough(indexRowKey, row, ts);
+ // Now we will do a single row scan to retrieve the verified
index row build from the data table row
+ // if such an index row exists. Note we cannot read all
versions in one scan as the max number of row
+ // versions for an index table can be 1. In that case, we will
get only one (i.e., the most recent
+ // version instead of all versions
+ singleRowIndexScan.withStartRow(indexRowKey, true);
+ singleRowIndexScan.withStopRow(indexRowKey, true);
+ singleRowIndexScan.setTimeRange(minTimestamp, ts);
+ RegionScanner singleRowScanner =
region.getScanner(singleRowIndexScan);
+ row.clear();
+ singleRowScanner.next(row);
+ singleRowScanner.close();
+ if (row.isEmpty()) {
+ // This means that the data row did not exist, so we need
to skip this unverified row (i.e., do not
+ // return it to the client). Just retuning empty row is
sufficient to do that
+ return;
+ }
+ ts = getMaxTimestamp(row);
}
- // This means the current index row is deleted by the rebuild
process and we got the next row.
- // If it is verified then we are good to go. If not, then we need
to repair the new row
- if (!verifyRowAndRemoveEmptyColumn(row)) {
- // Rewind the scanner and let the row be scanned again so that
it can be repaired
- scanner.close();
+ if (indexRowExists) {
+ // This means there does not exist a data row for the
unverified index row. Skip this row. To do that
+ // just return empty row.
+ row.clear();
+ return;
+ } else {
+ // This means the index row has been deleted. We got the next
row
+ // If the next row is "verified" (or empty) then we are good
to go.
+ if (verifyRowAndRemoveEmptyColumn(row)) {
+ return;
+ }
+ // The next row is "unverified". Rewind the scanner and let
the row be scanned again
+ // so that it can be repaired
scanner = region.getScanner(indexScan);
row.clear();
}
diff --git
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 1302760..ffeec51 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -348,8 +348,6 @@ public interface QueryServices extends SQLCloseable {
public static final String TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB =
"phoenix.task.handling.initial.delay.ms";
// The minimum age of an unverified global index row to be eligible for
deletion
public static final String
GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB =
"phoenix.global.index.row.age.threshold.to.delete.ms";
- // The maximum number of global index rows to be rebuild at a time
- public static final String GLOBAL_INDEX_ROW_REPAIR_COUNT_ATTRIB =
"phoenix.global.index.row.repair.count.ms";
// Enable the IndexRegionObserver Coprocessor
public static final String INDEX_REGION_OBSERVER_ENABLED_ATTRIB =
"phoenix.index.region.observer.enabled";
// Enable support for long view index(default is false)
diff --git
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 01da266..359b06d 100644
---
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -357,7 +357,6 @@ public class QueryServicesOptions {
public static final long DEFAULT_TASK_HANDLING_INITIAL_DELAY_MS = 10*1000;
// 10 sec
public static final long
DEFAULT_GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS = 10*60*1000; /* 10 min */
- public static final int DEFAULT_GLOBAL_INDEX_REPAIR_COUNT = 1;
public static final boolean DEFAULT_INDEX_REGION_OBSERVER_ENABLED = true;
public static final boolean
DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = false;
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 12ba8d4..ee0683b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -20,8 +20,6 @@ package org.apache.phoenix.query;
import static
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter.NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY;
import static org.apache.phoenix.query.QueryConstants.MILLIS_IN_DAY;
import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
-import static
org.apache.phoenix.query.QueryServices.GLOBAL_INDEX_ROW_REPAIR_COUNT_ATTRIB;
-import static
org.apache.phoenix.query.QueryServices.INDEX_FAILURE_DISABLE_INDEX;
import static org.apache.phoenix.util.PhoenixRuntime.CURRENT_SCN_ATTRIB;
import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
@@ -627,7 +625,6 @@ public abstract class BaseTest {
conf.setInt(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB, 10000);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
conf.setInt(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY, 1);
- conf.setInt(GLOBAL_INDEX_ROW_REPAIR_COUNT_ATTRIB, 5);
return conf;
}