[2/2] phoenix git commit: PHOENIX-3280 Automatic attempt to rebuild all disabled index

2016-09-15 Thread jamestaylor
PHOENIX-3280 Automatic attempt to rebuild all disabled index


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/056333da
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/056333da
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/056333da

Branch: refs/heads/4.x-HBase-0.98
Commit: 056333da2ba2d2f2f19c0d65b253c26dbeac4dc4
Parents: 5e63bd2
Author: James Taylor 
Authored: Thu Sep 15 00:48:24 2016 -0700
Committer: James Taylor 
Committed: Thu Sep 15 01:28:28 2016 -0700

--
 .../coprocessor/MetaDataRegionObserver.java | 179 +++
 1 file changed, 104 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/056333da/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 66a5dcb..aef5c69 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -20,8 +20,8 @@ package org.apache.phoenix.coprocessor;
 import java.io.IOException;
 import java.sql.SQLException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import java.util.TimerTask;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -79,6 +79,7 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.UpgradeUtil;
 
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 
 
 /**
@@ -223,13 +224,11 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
 PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES);
 
-PTable dataPTable = null;
+Map> dataTableToIndexesMap = null;
 MetaDataClient client = null;
 boolean hasMore = false;
 List results = new ArrayList();
-List indexesToPartiallyRebuild = 
Collections.emptyList();
 scanner = this.env.getRegion().getScanner(scan);
-long earliestDisableTimestamp = Long.MAX_VALUE;
 
 do {
 results.clear();
@@ -249,19 +248,12 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 if (disabledTimeStampVal <= 0) {
 continue;
 }
-if (disabledTimeStampVal < earliestDisableTimestamp) {
-earliestDisableTimestamp = disabledTimeStampVal;
-}
-
 byte[] dataTable = 
r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
 PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
-byte[] indexStat = 
r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+byte[] indexState = 
r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
 PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
-if ((dataTable == null || dataTable.length == 0) || 
(indexStat == null || indexStat.length == 0)
-|| (dataPTable != null
-&& 
Bytes.compareTo(dataPTable.getName().getBytes(), dataTable) != 0)) {
+if ((dataTable == null || dataTable.length == 0) || 
(indexState == null || indexState.length == 0)) {
 // data table name can't be empty
-// we need to build indexes of same data table. so 
skip other indexes for this task.
 continue;
 }
 
@@ -284,14 +276,19 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
// don't run a second index populations upsert select 
 
props.setProperty(QueryServices.INDEX_POPULATION_SLEEP_TIME, "0"); 
 conn = QueryUtil.getConnectionOnServer(props, 
env.getConfiguration()).unwrap(PhoenixConnection.class);
-String dataTableFullName = 
SchemaUtil.getTableName(schemaName, dataTable);
-dataPTable = PhoenixRuntime.getTable(conn, 
dataTableFullName);
-indexesToPartiallyRebuild = 
Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
 client = new MetaDataClient(conn);
+dataTableToIndexesMap = Maps.n

[2/2] phoenix git commit: PHOENIX-3280 Automatic attempt to rebuild all disabled index

2016-09-15 Thread jamestaylor
PHOENIX-3280 Automatic attempt to rebuild all disabled index


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e2fc00db
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e2fc00db
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e2fc00db

Branch: refs/heads/4.x-HBase-1.1
Commit: e2fc00dbcabf84c6a8b6ed36e58c32b73d2af23d
Parents: a2ef919
Author: James Taylor 
Authored: Thu Sep 15 00:48:24 2016 -0700
Committer: James Taylor 
Committed: Thu Sep 15 01:18:30 2016 -0700

--
 .../coprocessor/MetaDataRegionObserver.java | 179 +++
 1 file changed, 104 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2fc00db/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index ff100a0..00981f5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -20,8 +20,8 @@ package org.apache.phoenix.coprocessor;
 import java.io.IOException;
 import java.sql.SQLException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import java.util.TimerTask;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -79,6 +79,7 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.UpgradeUtil;
 
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 
 
 /**
@@ -223,13 +224,11 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
 PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES);
 
-PTable dataPTable = null;
+Map> dataTableToIndexesMap = null;
 MetaDataClient client = null;
 boolean hasMore = false;
 List results = new ArrayList();
-List indexesToPartiallyRebuild = 
Collections.emptyList();
 scanner = this.env.getRegion().getScanner(scan);
-long earliestDisableTimestamp = Long.MAX_VALUE;
 
 do {
 results.clear();
@@ -249,19 +248,12 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 if (disabledTimeStampVal <= 0) {
 continue;
 }
-if (disabledTimeStampVal < earliestDisableTimestamp) {
-earliestDisableTimestamp = disabledTimeStampVal;
-}
-
 byte[] dataTable = 
r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
 PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
-byte[] indexStat = 
r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+byte[] indexState = 
r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
 PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
-if ((dataTable == null || dataTable.length == 0) || 
(indexStat == null || indexStat.length == 0)
-|| (dataPTable != null
-&& 
!dataPTable.getName().getString().equals(Bytes.toString(dataTable {
+if ((dataTable == null || dataTable.length == 0) || 
(indexState == null || indexState.length == 0)) {
 // data table name can't be empty
-// we need to build indexes of same data table. so 
skip other indexes for this task.
 continue;
 }
 
@@ -284,14 +276,19 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
// don't run a second index populations upsert select 
 
props.setProperty(QueryServices.INDEX_POPULATION_SLEEP_TIME, "0"); 
 conn = QueryUtil.getConnectionOnServer(props, 
env.getConfiguration()).unwrap(PhoenixConnection.class);
-String dataTableFullName = 
SchemaUtil.getTableName(schemaName, dataTable);
-dataPTable = PhoenixRuntime.getTable(conn, 
dataTableFullName);
-indexesToPartiallyRebuild = 
Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
 client = new MetaDataClient(conn);
+dataTableToIndexesMap = Maps