Repository: phoenix
Updated Branches:
  refs/heads/4.11-HBase-0.98 c37f6c408 -> 12ec493b4


PHOENIX-3947 Increase scan time out for partial index rebuild and retry only 
once


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/12ec493b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/12ec493b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/12ec493b

Branch: refs/heads/4.11-HBase-0.98
Commit: 12ec493b4b4f138e29756f13a5babf3d0b3be25a
Parents: c37f6c4
Author: Samarth Jain <[email protected]>
Authored: Thu Jul 13 19:35:33 2017 -0700
Committer: Samarth Jain <[email protected]>
Committed: Thu Jul 13 19:35:33 2017 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/ConnectionUtilIT.java       |   2 -
 .../phoenix/end2end/PhoenixRuntimeIT.java       |  70 ++++++++++
 .../end2end/index/MutableIndexFailureIT.java    | 102 +++++++++-----
 .../coprocessor/MetaDataEndpointImpl.java       |   4 +-
 .../coprocessor/MetaDataRegionObserver.java     | 139 ++++++++++++++-----
 .../phoenix/mapreduce/util/ConnectionUtil.java  |   4 +-
 .../org/apache/phoenix/query/QueryServices.java |   5 +
 .../phoenix/query/QueryServicesOptions.java     |   5 +
 .../org/apache/phoenix/util/PropertiesUtil.java |  28 ++--
 .../java/org/apache/phoenix/util/QueryUtil.java |  61 ++++++--
 .../apache/phoenix/util/PropertiesUtilTest.java |  19 ++-
 .../hive/util/PhoenixConnectionUtil.java        |   2 +-
 12 files changed, 342 insertions(+), 99 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionUtilIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionUtilIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionUtilIT.java
index 64bb9ec..4841bcb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionUtilIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionUtilIT.java
@@ -22,7 +22,6 @@ import static 
org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
 import static org.junit.Assert.assertEquals;
 
 import java.sql.Connection;
-import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
@@ -32,7 +31,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
-import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
index 91e9370..1109070 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.end2end;
 
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -29,6 +31,9 @@ import java.util.HashSet;
 import java.util.Properties;
 import java.util.Set;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -40,9 +45,13 @@ import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.types.PVarchar;
@@ -51,6 +60,7 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 import com.google.common.collect.Sets;
 
@@ -148,4 +158,64 @@ public class PhoenixRuntimeIT extends 
ParallelStatsDisabledIT {
         assertTenantIds(e7, htable7, new FirstKeyOnlyFilter(), new String[] 
{t1, t2} );
     }
     
+    @Test
+    public void testRebuildIndexConnectionProperties() throws Exception {
+        try (PhoenixConnection rebuildIndexConnection =
+                MetaDataRegionObserver.getRebuildIndexConnection(config)) {
+            try (PhoenixConnection regularConnection =
+                    
DriverManager.getConnection(url).unwrap(PhoenixConnection.class)) {
+                String rebuildUrl = rebuildIndexConnection.getURL();
+                // assert that the url ends with expected string
+                assertTrue(
+                    
rebuildUrl.contains(MetaDataRegionObserver.REBUILD_INDEX_APPEND_TO_URL_STRING));
+                // assert that the url for regular connection vs the rebuild 
connection is different
+                assertFalse(rebuildUrl.equals(regularConnection.getURL()));
+                Configuration rebuildQueryServicesConfig =
+                        
rebuildIndexConnection.getQueryServices().getConfiguration();
+                // assert that the properties are part of the query services 
config
+                assertEquals(Long.toString(Long.MAX_VALUE),
+                    
rebuildQueryServicesConfig.get(PhoenixRuntime.CURRENT_SCN_ATTRIB));
+                assertEquals(
+                    
Long.toString(QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT),
+                    
rebuildQueryServicesConfig.get(QueryServices.THREAD_TIMEOUT_MS_ATTRIB));
+                assertEquals(
+                    Long.toString(
+                        
QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT),
+                    
rebuildQueryServicesConfig.get(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));
+                
assertEquals(Long.toString(QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT),
+                    
rebuildQueryServicesConfig.get(HConstants.HBASE_RPC_TIMEOUT_KEY));
+                assertEquals(
+                    
Long.toString(QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER),
+                    
rebuildQueryServicesConfig.get(HConstants.HBASE_CLIENT_RETRIES_NUMBER));
+                assertEquals(
+                    
Long.toString(QueryServicesOptions.DEFAULT_INDEX_REBULD_RPC_RETRY_PAUSE),
+                    
rebuildQueryServicesConfig.get(HConstants.HBASE_CLIENT_PAUSE));
+                ConnectionQueryServices rebuildQueryServices = 
rebuildIndexConnection.getQueryServices();
+                HConnection rebuildIndexHConnection =
+                        (HConnection) 
Whitebox.getInternalState(rebuildQueryServices,
+                            "connection");
+                HConnection regularHConnection =
+                        (HConnection) Whitebox.getInternalState(
+                            regularConnection.getQueryServices(), 
"connection");
+                // assert that a new HConnection was spawned
+                assertFalse(
+                    
regularHConnection.toString().equals(rebuildIndexHConnection.toString()));
+                Configuration rebuildHConnectionConfig = 
rebuildIndexHConnection.getConfiguration();
+                // assert that the HConnection has the desired properties 
needed for rebuilding
+                // indices
+                assertEquals(
+                    Long.toString(
+                        
QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT),
+                    
rebuildHConnectionConfig.get(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));
+                
assertEquals(Long.toString(QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT),
+                    
rebuildHConnectionConfig.get(HConstants.HBASE_RPC_TIMEOUT_KEY));
+                assertEquals(
+                    
Long.toString(QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER),
+                    
rebuildHConnectionConfig.get(HConstants.HBASE_CLIENT_RETRIES_NUMBER));
+                assertEquals(
+                    
Long.toString(QueryServicesOptions.DEFAULT_INDEX_REBULD_RPC_RETRY_PAUSE),
+                    
rebuildHConnectionConfig.get(HConstants.HBASE_CLIENT_PAUSE));
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 5da9b27..d7f3a59 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -137,28 +137,37 @@ public class MutableIndexFailureIT extends BaseTest {
     @Parameters(name = 
"MutableIndexFailureIT_transactional={0},localIndex={1},isNamespaceMapped={2},disableIndexOnWriteFailure={3},rebuildIndexOnWriteFailure={4}")
 // name is used by failsafe as file name in reports
     public static List<Object[]> data() {
         return Arrays.asList(new Object[][] { 
-                { false, false, true, true, true }, 
-                { false, false, false, true, true }, 
-                { true, false, false, true, true }, 
-                { true, false, true, true, true },
-                { false, true, true, true, true }, 
-                { false, true, false, null, null }, 
-                { true, true, false, true, null }, 
-                { true, true, true, null, true },
-
-                { false, false, false, false, true }, 
-                { false, true, false, false, null }, 
-                { false, false, false, false, false }, 
+                { false, false, true, true, true},
+                { false, false, false, true, true},
+                { true, false, false, true, true},
+                { true, false, true, true, true},
+                { false, true, true, true, true},
+                { false, true, false, null, null},
+                { true, true, false, true, null},
+                { true, true, true, null, true},
+
+                { false, false, false, false, true},
+                { false, true, false, false, null},
+                { false, false, false, false, false},
+                { false, false, false, true, true},
+                { false, false, false, true, true},
+                { false, true, false, true, true},
+                { false, true, false, true, true},
         } 
         );
     }
 
     @Test
     public void testWriteFailureDisablesIndex() throws Exception {
-        helpTestWriteFailureDisablesIndex();
+        helpTestWriteFailureDisablesIndex(false);
+    }
+
+    @Test
+    public void testRebuildTaskFailureMarksIndexDisabled() throws Exception {
+        helpTestWriteFailureDisablesIndex(true);
     }
 
-    public void helpTestWriteFailureDisablesIndex() throws Exception {
+    public void helpTestWriteFailureDisablesIndex(boolean failRebuildTask) 
throws Exception {
         String secondIndexName = "B_" + FailingRegionObserver.FAIL_INDEX_NAME;
 //        String thirdIndexName = "C_" + INDEX_NAME;
 //        String thirdFullIndexName = SchemaUtil.getTableName(schema, 
thirdIndexName);
@@ -267,26 +276,55 @@ public class MutableIndexFailureIT extends BaseTest {
             // Comment back in when PHOENIX-3815 is fixed
 //            validateDataWithIndex(conn, fullTableName, thirdFullIndexName, 
false);
 
-            // re-enable index table
-            FailingRegionObserver.FAIL_WRITE = false;
-            if (rebuildIndexOnWriteFailure) {
-                // wait for index to be rebuilt automatically
-                waitForIndexToBeRebuilt(conn,indexName);
+            if (!failRebuildTask) {
+                // re-enable index table
+                FailingRegionObserver.FAIL_WRITE = false;
+                if (rebuildIndexOnWriteFailure) {
+                    // wait for index to be rebuilt automatically
+                    waitForIndexToBeRebuilt(conn,indexName);
+                } else {
+                    // simulate replaying failed mutation
+                    replayMutations();
+                }
+
+                // Verify UPSERT on data table still works after index table 
is recreated
+                PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " 
+ fullTableName + " VALUES(?,?,?)");
+                stmt.setString(1, "a3");
+                stmt.setString(2, "x4");
+                stmt.setString(3, "4");
+                stmt.execute();
+                conn.commit();
+
+                // verify index table has correct data (note that second index 
has been dropped)
+                validateDataWithIndex(conn, fullTableName, fullIndexName, 
localIndex);
             } else {
-                // simulate replaying failed mutation
-                replayMutations();
+                // the index is only disabled for non-txn tables upon index 
table write failure
+                if (rebuildIndexOnWriteFailure && !transactional && 
!leaveIndexActiveOnFailure && !localIndex) {
+                    try {
+                        // Wait for index to be rebuilt automatically. This 
should fail because
+                        // we haven't flipped the FAIL_WRITE flag to false and 
as a result this
+                        // should cause index rebuild to fail too.
+                        waitForIndexToBeRebuilt(conn, indexName);
+                        // verify that the index was marked as disabled and 
the index disable
+                        // timestamp set to 0
+                        String q =
+                                "SELECT INDEX_STATE, INDEX_DISABLE_TIMESTAMP 
FROM SYSTEM.CATALOG WHERE TABLE_SCHEM = '"
+                                        + schema + "' AND TABLE_NAME = '" + 
indexName + "'"
+                                        + " AND COLUMN_NAME IS NULL AND 
COLUMN_FAMILY IS NULL";
+                        try (ResultSet r = 
conn.createStatement().executeQuery(q)) {
+                            assertTrue(r.next());
+                            
assertEquals(PIndexState.DISABLE.getSerializedValue(), r.getString(1));
+                            assertEquals(0, r.getLong(2));
+                            assertFalse(r.next());
+                        }
+                    } finally {
+                        // even if the above test fails, make sure we leave 
the index active
+                        // as other tests might be dependent on it
+                        FAIL_WRITE = false;
+                        waitForIndexToBeRebuilt(conn, indexName);
+                    }
+                }
             }
-
-            // Verify UPSERT on data table still works after index table is 
recreated
-            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + 
fullTableName + " VALUES(?,?,?)");
-            stmt.setString(1, "a3");
-            stmt.setString(2, "x4");
-            stmt.setString(3, "4");
-            stmt.execute();
-            conn.commit();
-            
-            // verify index table has correct data (note that second index has 
been dropped)
-            validateDataWithIndex(conn, fullTableName, fullIndexName, 
localIndex);
         } finally {
             FAIL_WRITE = false;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 58178e9..68251e9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -3468,13 +3468,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
             int disableTimeStampKVIndex = -1;
             int indexStateKVIndex = 0;
             int index = 0;
-            for(Cell cell : newKVs){
+            for(Cell cell : newKVs) {
                 if(Bytes.compareTo(cell.getQualifierArray(), 
cell.getQualifierOffset(), cell.getQualifierLength(),
                       INDEX_STATE_BYTES, 0, INDEX_STATE_BYTES.length) == 0){
                   newKV = cell;
                   indexStateKVIndex = index;
                 } else if (Bytes.compareTo(cell.getQualifierArray(), 
cell.getQualifierOffset(), cell.getQualifierLength(),
-                  INDEX_DISABLE_TIMESTAMP_BYTES, 0, 
INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0){
+                  INDEX_DISABLE_TIMESTAMP_BYTES, 0, 
INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0) {
                   disableTimeStampKVIndex = index;
                 }
                 index++;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 7e4f1a9..9c949c5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -32,8 +32,11 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import javax.annotation.concurrent.GuardedBy;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
@@ -80,11 +83,13 @@ import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.UpgradeUtil;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.protobuf.ServiceException;
@@ -96,11 +101,15 @@ import com.google.protobuf.ServiceException;
  */
 public class MetaDataRegionObserver extends BaseRegionObserver {
     public static final Log LOG = 
LogFactory.getLog(MetaDataRegionObserver.class);
+    public static final String REBUILD_INDEX_APPEND_TO_URL_STRING = 
"REBUILDINDEX";
     protected ScheduledThreadPoolExecutor executor = new 
ScheduledThreadPoolExecutor(1);
     private boolean enableRebuildIndex = 
QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD;
     private long rebuildIndexTimeInterval = 
QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL;
     private static Map<PName, Long> batchExecutedPerTableMap = new 
HashMap<PName, Long>();
 
+    @GuardedBy("MetaDataRegionObserver.class")
+    private static Properties rebuildIndexConnectionProps;
+
     @Override
     public void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
             boolean abortRequested) {
@@ -113,7 +122,8 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
         // sleep a little bit to compensate time clock skew when 
SYSTEM.CATALOG moves
         // among region servers because we relies on server time of RS which 
is hosting
         // SYSTEM.CATALOG
-        long sleepTime = 
env.getConfiguration().getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB,
+        Configuration config = env.getConfiguration();
+        long sleepTime = 
config.getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB,
             QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL);
         try {
             if(sleepTime > 0) {
@@ -122,11 +132,14 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
         } catch (InterruptedException ie) {
             Thread.currentThread().interrupt();
         }
-        enableRebuildIndex = 
env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
-            QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD);
-        rebuildIndexTimeInterval = 
env.getConfiguration().getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
-            
QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL);
-        
+        enableRebuildIndex =
+                config.getBoolean(
+                    QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
+                    
QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD);
+        rebuildIndexTimeInterval =
+                config.getLong(
+                    
QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
+                    
QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL);
     }
     
     @Override
@@ -179,6 +192,7 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
         }
         try {
             Class.forName(PhoenixDriver.class.getName());
+            
initRebuildIndexConnectionProps(e.getEnvironment().getConfiguration());
             // starts index rebuild schedule work
             BuildIndexScheduleTask task = new 
BuildIndexScheduleTask(e.getEnvironment());
             // run scheduled task every 10 secs
@@ -202,9 +216,10 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 
         public BuildIndexScheduleTask(RegionCoprocessorEnvironment env) {
             this.env = env;
-            this.rebuildIndexBatchSize = env.getConfiguration().getLong(
+            Configuration configuration = env.getConfiguration();
+            this.rebuildIndexBatchSize = configuration.getLong(
                     QueryServices.INDEX_FAILURE_HANDLING_REBUILD_PERIOD, 
HConstants.LATEST_TIMESTAMP);
-            this.configuredBatches = env.getConfiguration().getLong(
+            this.configuredBatches = configuration.getLong(
                     
QueryServices.INDEX_FAILURE_HANDLING_REBUILD_NUMBER_OF_BATCHES_PER_TABLE, 
configuredBatches);
         }
 
@@ -277,18 +292,7 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                     }
 
                     if (conn == null) {
-                       final Properties props = new Properties();
-                       // Set SCN so that we don't ping server and have the 
upper bound set back to
-                       // the timestamp when the failure occurred.
-                       props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(Long.MAX_VALUE));
-                       
-                       //Set timeout to max value as rebuilding may take time
-                       
props.setProperty(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, 
Long.toString(Long.MAX_VALUE));
-                       
props.setProperty(QueryServices.HBASE_CLIENT_SCANNER_TIMEOUT_ATTRIB, 
Long.toString(Long.MAX_VALUE));
-                       props.setProperty(QueryServices.RPC_TIMEOUT_ATTRIB, 
Long.toString(Long.MAX_VALUE));
-                       // don't run a second index populations upsert select 
-                        
props.setProperty(QueryServices.INDEX_POPULATION_SLEEP_TIME, "0"); 
-                        conn = QueryUtil.getConnectionOnServer(props, 
env.getConfiguration()).unwrap(PhoenixConnection.class);
+                        conn = 
getRebuildIndexConnection(env.getConfiguration());
                         dataTableToIndexesMap = Maps.newHashMap();
                     }
                     String dataTableFullName = 
SchemaUtil.getTableName(schemaName, dataTable);
@@ -309,7 +313,7 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                     // Allow index to begin incremental maintenance as index 
is back online and we
                     // cannot transition directly from DISABLED -> ACTIVE
                     if 
(Bytes.compareTo(PIndexState.DISABLE.getSerializedBytes(), indexState) == 0) {
-                        updateIndexState(conn, indexTableFullName, env, 
PIndexState.DISABLE, PIndexState.INACTIVE);
+                        updateIndexState(conn, indexTableFullName, env, 
PIndexState.DISABLE, PIndexState.INACTIVE, null);
                     }
                     List<PTable> indexesToPartiallyRebuild = 
dataTableToIndexesMap.get(dataPTable);
                     if (indexesToPartiallyRebuild == null) {
@@ -404,7 +408,7 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                                                                                
indexPTable.getTableName().getString());
                                                                if (scanEndTime 
== HConstants.LATEST_TIMESTAMP) {
                                                                        
updateIndexState(conn, indexTableFullName, env, PIndexState.INACTIVE,
-                                                                               
        PIndexState.ACTIVE);
+                                                                               
        PIndexState.ACTIVE, 0l);
                                                                        
batchExecutedPerTableMap.remove(dataPTable.getName());
                                     LOG.info("Making Index:" + 
indexPTable.getTableName() + " active after rebuilding");
                                                                } else {
@@ -425,12 +429,26 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                                                                                
        "During Round-robin build: Successfully updated index disabled 
timestamp  for "
                                                                                
                        + indexTableFullName + " to " + scanEndTime);
                                                                }
-
                                                        }
-                                               } catch (Exception e) { // Log, 
but try next table's
-                                                                               
                // indexes
-                                                       LOG.warn("Unable to 
rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild
-                                                                       + ". 
Will try again next on next scheduled invocation.", e);
+                                               } catch (Exception e) {
+                                                       for (PTable index : 
indexesToPartiallyRebuild) {
+                                                       String 
indexTableFullName = SchemaUtil.getTableName(
+                                    index.getSchemaName().getString(),
+                                    index.getTableName().getString());
+                                try {
+                                    /*
+                                     * We are going to mark the index as 
disabled and set the index
+                                     * disable timestamp to 0 so that the 
rebuild task won't pick up
+                                     * this index again for rebuild.
+                                     */
+                                    updateIndexState(conn, indexTableFullName, 
env,
+                                        PIndexState.INACTIVE, 
PIndexState.DISABLE, 0l);
+                                } catch (Throwable ex) {
+                                                           LOG.error("Unable 
to mark index " + indexTableFullName + " as disabled after rebuilding it 
failed", ex);
+                                                       }
+                                                   }
+                                                       LOG.error("Unable to 
rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild
+                                                                       + ". 
Won't attempt again. Manual intervention needed to re-build the index", e);
                                                }
                                        }
                                }
@@ -470,8 +488,12 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
     }
     
        private static void updateIndexState(PhoenixConnection conn, String 
indexTableName,
-                       RegionCoprocessorEnvironment env, PIndexState oldState, 
PIndexState newState)
+                       RegionCoprocessorEnvironment env, PIndexState oldState, 
PIndexState newState, Long indexDisableTimestamp)
                                        throws ServiceException, Throwable {
+        if (newState == PIndexState.ACTIVE) {
+            Preconditions.checkArgument(indexDisableTimestamp == 0,
+                "Index disable timestamp has to be 0 when marking an index as 
active");
+        }
                byte[] indexTableKey = 
SchemaUtil.getTableKeyFromFullName(indexTableName);
                String schemaName = 
SchemaUtil.getSchemaNameFromFullName(indexTableName);
                String indexName = 
SchemaUtil.getTableNameFromFullName(indexTableName);
@@ -480,12 +502,15 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                Put put = new Put(indexTableKey);
                put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, 
PhoenixDatabaseMetaData.INDEX_STATE_BYTES,
                                newState.getSerializedBytes());
-               if (newState == PIndexState.ACTIVE) {
-                       put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
-                                       
PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, 
PLong.INSTANCE.toBytes(0));
-                       put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
-                                       
PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, 
PLong.INSTANCE.toBytes(0));
-               }
+               if (indexDisableTimestamp != null) {
+            put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES,
+                PLong.INSTANCE.toBytes(indexDisableTimestamp));
+        }
+        if (newState == PIndexState.ACTIVE) {
+            put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, 
PLong.INSTANCE.toBytes(0));
+        }
                final List<Mutation> tableMetadata = Collections.<Mutation> 
singletonList(put);
                MetaDataMutationResult result = 
conn.getQueryServices().updateIndexState(tableMetadata, null);
                MutationCode code = result.getMutationCode();
@@ -511,4 +536,50 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                                
PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, CompareOp.NOT_EQUAL, 
PLong.INSTANCE.toBytes(0),
                                rowMutations);
        }
+
+    private static synchronized void 
initRebuildIndexConnectionProps(Configuration config) {
+        if (rebuildIndexConnectionProps == null) {
+            Properties props = new Properties();
+            long indexRebuildQueryTimeoutMs =
+                    
config.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB,
+                        
QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT);
+            long indexRebuildRPCTimeoutMs =
+                    
config.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB,
+                        
QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT);
+            long indexRebuildClientScannerTimeOutMs =
+                    
config.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB,
+                        
QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT);
+            int indexRebuildRpcRetriesCounter =
+                    
config.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER,
+                        
QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER);
+            long indexRebuildRpcRetryPauseTimeMs =
+                    
config.getLong(QueryServices.INDEX_REBUILD_RPC_RETRY_PAUSE_TIME,
+                        
QueryServicesOptions.DEFAULT_INDEX_REBULD_RPC_RETRY_PAUSE);
+            // Set SCN so that we don't ping server and have the upper bound 
set back to
+            // the timestamp when the failure occurred.
+            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(Long.MAX_VALUE));
+            // Set various phoenix and hbase level timeouts and rpc retries
+            props.setProperty(QueryServices.THREAD_TIMEOUT_MS_ATTRIB,
+                Long.toString(indexRebuildQueryTimeoutMs));
+            props.setProperty(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
+                Long.toString(indexRebuildClientScannerTimeOutMs));
+            props.setProperty(HConstants.HBASE_RPC_TIMEOUT_KEY,
+                Long.toString(indexRebuildRPCTimeoutMs));
+            props.setProperty(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+                Long.toString(indexRebuildRpcRetriesCounter));
+            props.setProperty(HConstants.HBASE_CLIENT_PAUSE,
+                Long.toString(indexRebuildRpcRetryPauseTimeMs));
+            // don't run a second index populations upsert select
+            props.setProperty(QueryServices.INDEX_POPULATION_SLEEP_TIME, "0");
+            rebuildIndexConnectionProps = 
PropertiesUtil.combineProperties(props, config);
+        }
+    }
+
+    public static PhoenixConnection getRebuildIndexConnection(Configuration 
config)
+            throws SQLException, ClassNotFoundException {
+        initRebuildIndexConnectionProps(config);
+        //return QueryUtil.getConnectionOnServer(rebuildIndexConnectionProps, 
config).unwrap(PhoenixConnection.class);
+        return 
QueryUtil.getConnectionOnServerWithCustomUrl(rebuildIndexConnectionProps,
+            
REBUILD_INDEX_APPEND_TO_URL_STRING).unwrap(PhoenixConnection.class);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
index 4ba33e8..ada3816 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
@@ -57,7 +57,7 @@ public class ConnectionUtil {
                return 
getConnection(PhoenixConfigurationUtil.getInputCluster(conf),
                                PhoenixConfigurationUtil.getClientPort(conf),
                                PhoenixConfigurationUtil.getZNodeParent(conf),
-                               PropertiesUtil.extractProperties(props, conf));
+                               PropertiesUtil.combineProperties(props, conf));
     }
 
     /**
@@ -82,7 +82,7 @@ public class ConnectionUtil {
                return 
getConnection(PhoenixConfigurationUtil.getOutputCluster(conf),
                                PhoenixConfigurationUtil.getClientPort(conf),
                                PhoenixConfigurationUtil.getZNodeParent(conf),
-                               PropertiesUtil.extractProperties(props, conf));
+                               PropertiesUtil.combineProperties(props, conf));
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 7c37930..2f23282 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -130,6 +130,11 @@ public interface QueryServices extends SQLCloseable {
     // A master switch if to enable auto rebuild an index which failed to be 
updated previously
     public static final String INDEX_FAILURE_HANDLING_REBUILD_ATTRIB = 
"phoenix.index.failure.handling.rebuild";
     public static final String INDEX_FAILURE_HANDLING_REBUILD_PERIOD = 
"phoenix.index.failure.handling.rebuild.period";
+    public static final String INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB = 
"phoenix.index.rebuild.query.timeout";
+    public static final String INDEX_REBUILD_RPC_TIMEOUT_ATTRIB = 
"phoenix.index.rebuild.rpc.timeout";
+    public static final String INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB = 
"phoenix.index.rebuild.client.scanner.timeout";
+    public static final String INDEX_REBUILD_RPC_RETRIES_COUNTER = 
"phoenix.index.rebuild.rpc.retries.counter";
+    public static final String INDEX_REBUILD_RPC_RETRY_PAUSE_TIME = 
"phoenix.index.rebuild.rpc.retry.pause";
 
     // Time interval to check if there is an index needs to be rebuild
     public static final String INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB =

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index b8e92a7..e26ddf0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -172,6 +172,11 @@ public class QueryServicesOptions {
     public static final boolean DEFAULT_INDEX_FAILURE_DISABLE_INDEX = true; 
     public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 
60000; // 60 secs
     public static final long 
DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME = 1; // 1 ms
+    public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = 30000 * 60; 
// 30 mins
+    public static final long DEFAULT_INDEX_REBUILD_RPC_TIMEOUT = 30000 * 60; 
// 30 mins
+    public static final long DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT = 
30000 * 60; // 30 mins
+    public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 5;
+    public static final long DEFAULT_INDEX_REBULD_RPC_RETRY_PAUSE = 3000; // 3 
seconds
 
     /**
      * HConstants#HIGH_QOS is the max we will see to a standard table. We go 
higher to differentiate

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
index f59c01b..f6eb5c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
@@ -41,26 +41,30 @@ public class PropertiesUtil {
         return newProperties;
     }
     
-     /**
-     * Add properties from the given Configuration to the provided Properties.
-     *
-     * @param props properties to which connection information from the 
Configuration will be added
-     * @param conf configuration containing connection information
-     * @return the input Properties value, with additional connection 
information from the
-     * given Configuration
+    /**
+     * Add properties from the given Configuration to the provided Properties. 
Note that only those
+     * configuration properties will be added to the provided properties whose 
values are already
+     * not set. The method doesn't modify the passed in properties instead 
makes a clone of them
+     * before combining.
+     * @return properties object that is a combination of properties contained 
in props and
+     *         properties contained in conf
      */
-    public static Properties extractProperties(Properties props, final 
Configuration conf) {
+    public static Properties combineProperties(Properties props, final 
Configuration conf) {
         Iterator<Map.Entry<String, String>> iterator = conf.iterator();
-        if(iterator != null) {
+        Properties copy = deepCopy(props);
+        if (iterator != null) {
             while (iterator.hasNext()) {
                 Map.Entry<String, String> entry = iterator.next();
-                props.setProperty(entry.getKey(), entry.getValue());
+                // set the property from config only if props doesn't have it 
already
+                if (copy.getProperty(entry.getKey()) == null) {
+                    copy.setProperty(entry.getKey(), entry.getValue());
+                }
             }
         }
-        return props;
+        return copy;
     }
 
-    /**
+   /**
      * Utility to work around the limitation of the copy constructor
      * {@link Configuration#Configuration(Configuration)} provided by the 
{@link Configuration}
      * class. See https://issues.apache.org/jira/browse/HBASE-18378.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index 0b82857..b8406b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -325,16 +325,24 @@ public final class QueryUtil {
         return getConnection(props, conf);
     }
 
+    public static Connection getConnectionOnServerWithCustomUrl(Properties 
props, String principal)
+            throws SQLException, ClassNotFoundException {
+        UpgradeUtil.doNotUpgradeOnFirstConnection(props);
+        String url = getConnectionUrl(props, null, principal);
+        LOG.info("Creating connection with the jdbc url: " + url);
+        return DriverManager.getConnection(url, props);
+    }
+
     public static Connection getConnection(Configuration conf) throws 
ClassNotFoundException,
             SQLException {
         return getConnection(new Properties(), conf);
     }
-    
+
     private static Connection getConnection(Properties props, Configuration 
conf)
             throws ClassNotFoundException, SQLException {
         String url = getConnectionUrl(props, conf);
         LOG.info("Creating connection with the jdbc url: " + url);
-        PropertiesUtil.extractProperties(props, conf);
+        props = PropertiesUtil.combineProperties(props, conf);
         return DriverManager.getConnection(url, props);
     }
 
@@ -342,24 +350,57 @@ public final class QueryUtil {
             throws ClassNotFoundException, SQLException {
         return getConnectionUrl(props, conf, null);
     }
+    /**
+     * @return connection url using the various properties set in props and 
conf. This method is an
+     *         alternative to {@link #getConnectionUrlUsingProps(Properties, 
String)} when all the
+     *         relevant connection properties are passed in both {@link 
Properties} and {@link Configuration}
+     */
     public static String getConnectionUrl(Properties props, Configuration 
conf, String principal)
             throws ClassNotFoundException, SQLException {
         // read the hbase properties from the configuration
-        int port = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 
HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
+        int port = getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 
HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT, props, conf);
         // Build the ZK quorum server string with "server:clientport" list, 
separated by ','
-        final String server =
-                conf.get(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
-        String znodeParent = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
-                HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
+        final String server = getString(HConstants.ZOOKEEPER_QUORUM, 
HConstants.LOCALHOST, props, conf);
+        String znodeParent = getString(HConstants.ZOOKEEPER_ZNODE_PARENT, 
HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT, props, conf);
         String url = getUrl(server, port, znodeParent, principal);
+        if (url.endsWith(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + "")) {
+            url = url.substring(0, url.length() - 1);
+        }
         // Mainly for testing to tack on the test=true part to ensure driver 
is found on server
-        String extraArgs = 
props.getProperty(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, 
conf.get(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, 
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS));
+        String defaultExtraArgs =
+                conf != null
+                        ? conf.get(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
+                            QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS)
+                        : QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS;
+        // If props doesn't have a default for extra args then use the extra 
args in conf as default
+        String extraArgs =
+                props.getProperty(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, 
defaultExtraArgs);
         if (extraArgs.length() > 0) {
-            url += extraArgs + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
+            url +=
+                    PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + extraArgs
+                            + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
+        } else {
+            url += PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
         }
         return url;
     }
-    
+
+    private static int getInt(String key, int defaultValue, Properties props, 
Configuration conf) {
+        if (conf == null) {
+            Preconditions.checkNotNull(props);
+            return Integer.parseInt(props.getProperty(key, 
String.valueOf(defaultValue)));
+        }
+        return conf.getInt(key, defaultValue);
+    }
+
+    private static String getString(String key, String defaultValue, 
Properties props, Configuration conf) {
+        if (conf == null) {
+            Preconditions.checkNotNull(props);
+            return props.getProperty(key, defaultValue);
+        }
+        return conf.get(key, defaultValue);
+    }
+
     public static String getViewStatement(String schemaName, String tableName, 
String where) {
         // Only form we currently support for VIEWs: SELECT * FROM t WHERE ...
         return SELECT + " " + WildcardParseNode.NAME + " " + FROM + " " +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java
index 17adfcb..1dc67da 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java
@@ -59,14 +59,25 @@ public class PropertiesUtilTest {
         conf.set(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
         conf.set(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY, 
                 PropertiesUtilTest.SOME_OTHER_PROPERTY_VALUE);
-        PropertiesUtil.extractProperties(props, conf);
-        assertEquals(props.getProperty(HConstants.ZOOKEEPER_QUORUM),
+        Properties combinedProps = PropertiesUtil.combineProperties(props, 
conf);
+        assertEquals(combinedProps.getProperty(HConstants.ZOOKEEPER_QUORUM),
                 conf.get(HConstants.ZOOKEEPER_QUORUM));
-        
assertEquals(props.getProperty(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY),
+        
assertEquals(combinedProps.getProperty(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY),
                 conf.get(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY));
     }
-    private void verifyValidCopy(Properties props) throws SQLException {
 
+    @Test
+    public void testPropertyOverrideRespected() throws Exception {
+        final Configuration conf = HBaseConfiguration.create();
+        final Properties props = new Properties();
+        props.setProperty(HConstants.HBASE_RPC_TIMEOUT_KEY,
+            Long.toString(HConstants.DEFAULT_HBASE_RPC_TIMEOUT * 10));
+        Properties combinedProps = PropertiesUtil.combineProperties(props, 
conf);
+        
assertEquals(combinedProps.getProperty(HConstants.HBASE_RPC_TIMEOUT_KEY),
+            Long.toString(HConstants.DEFAULT_HBASE_RPC_TIMEOUT * 10));
+    }
+
+    private void verifyValidCopy(Properties props) throws SQLException {
         Properties copy = PropertiesUtil.deepCopy(props);
         copy.containsKey(PhoenixRuntime.TENANT_ID_ATTRIB); //This checks the 
map and NOT the defaults in java.util.Properties
         assertEquals(SOME_TENANT_ID, 
copy.getProperty(PhoenixRuntime.TENANT_ID_ATTRIB));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12ec493b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
 
b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
index b32419a..8d76ac0 100644
--- 
a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
+++ 
b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
@@ -61,7 +61,7 @@ public class PhoenixConnectionUtil {
                 zNodeParent;
 
         return getConnection(quorum, zooKeeperClientPort, zNodeParent, 
PropertiesUtil
-                .extractProperties(props, conf));
+                .combineProperties(props, conf));
     }
 
     public static Connection getConnection(final Table table) throws 
SQLException {

Reply via email to