Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 1363d5fa6 -> 6c9855139


PHOENIX-4488 Cache config parameters for MetaDataEndPointImpl during 
initialization(James Taylor)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6c985513
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6c985513
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6c985513

Branch: refs/heads/5.x-HBase-2.0
Commit: 6c9855139a954c5d9e7db6b298d1f59458a83ef8
Parents: 1363d5f
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Sat Feb 24 00:03:43 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Sat Feb 24 00:03:43 2018 +0530

----------------------------------------------------------------------
 .../coprocessor/MetaDataEndpointImplTest.java   | 44 --------------------
 .../coprocessor/MetaDataEndpointImpl.java       | 26 ++++++------
 2 files changed, 13 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c985513/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
deleted file mode 100644
index 2c558d8..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.coprocessor;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
-import org.junit.Test;
-
-import java.util.List;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class MetaDataEndpointImplTest {
-
-    @Test
-    public void testExceededIndexQuota() throws Exception {
-        PTable parentTable = mock(PTable.class);
-        List<PTable> indexes = Lists.newArrayList(mock(PTable.class), 
mock(PTable.class));
-        when(parentTable.getIndexes()).thenReturn(indexes);
-        Configuration configuration = new Configuration();
-        assertFalse(MetaDataEndpointImpl.execeededIndexQuota(PTableType.INDEX, 
parentTable, configuration));
-        configuration.setInt(QueryServices.MAX_INDEXES_PER_TABLE, 1);
-        assertTrue(MetaDataEndpointImpl.execeededIndexQuota(PTableType.INDEX, 
parentTable, configuration));
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c985513/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 019777e..751aea0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -236,7 +236,6 @@ import org.apache.phoenix.util.UpgradeUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.cache.Cache;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -462,6 +461,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements RegionCopr
     }
 
     private RegionCoprocessorEnvironment env;
+    private boolean blockWriteRebuildIndex;
+    private int maxIndexesPerTable;
+    private boolean isTablesMappingEnabled;
 
     /**
      * Stores a reference to the coprocessor environment provided by the
@@ -480,6 +482,13 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements RegionCopr
         } else {
             throw new CoprocessorException("Must be loaded on a table 
region!");
         }
+        Configuration config = env.getConfiguration();
+        this.blockWriteRebuildIndex  = 
config.getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, 
+            QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
+        this.maxIndexesPerTable = 
config.getInt(QueryServices.MAX_INDEXES_PER_TABLE,
+            QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE);
+        this.isTablesMappingEnabled = 
SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE,
+            new ReadOnlyProps(config.iterator()));
         logger.info("Starting Tracing-Metrics Systems");
         // Start the phoenix trace collection
         Tracing.addTraceMetricsSource();
@@ -562,8 +571,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements RegionCopr
             PTable oldTable = (PTable)metaDataCache.getIfPresent(cacheKey);
             long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP-1 : 
oldTable.getTimeStamp();
             PTable newTable;
-            boolean blockWriteRebuildIndex = 
env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, 
-                    QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
             newTable = getTable(scanner, clientTimeStamp, tableTimeStamp, 
clientVersion);
             if (newTable == null) {
                 return null;
@@ -1437,7 +1444,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
                             return;
                         }
                         // make sure we haven't gone over our threshold for 
indexes on this table.
-                        if (execeededIndexQuota(tableType, parentTable, 
env.getConfiguration())) {
+                        if (execeededIndexQuota(tableType, parentTable)) {
                             
builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES);
                             
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                             done.run(builder.build());
@@ -1657,11 +1664,8 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
         }
     }
 
-    @VisibleForTesting
-    static boolean execeededIndexQuota(PTableType tableType, PTable 
parentTable, Configuration configuration) {
-        return PTableType.INDEX == tableType && 
parentTable.getIndexes().size() >= configuration
-            .getInt(QueryServices.MAX_INDEXES_PER_TABLE,
-                QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE);
+    private boolean execeededIndexQuota(PTableType tableType, PTable 
parentTable) {
+        return PTableType.INDEX == tableType && 
parentTable.getIndexes().size() >= maxIndexesPerTable;
     }
 
     private static final byte[] CHILD_TABLE_BYTES = new byte[] 
{PTable.LinkType.CHILD_TABLE.getSerializedValue()};
@@ -3141,8 +3145,6 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
          * from getting rebuilt too often.
          */
         final boolean wasLocked = (rowLock != null);
-        boolean blockWriteRebuildIndex = 
env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, 
-                QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
         if (!wasLocked) {
             rowLock = region.getRowLock(key, false);
             if (rowLock == null) {
@@ -3438,8 +3440,6 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 
         GetVersionResponse.Builder builder = GetVersionResponse.newBuilder();
         Configuration config = env.getConfiguration();
-        boolean isTablesMappingEnabled = 
SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE,
-                new ReadOnlyProps(config.iterator()));
         if (isTablesMappingEnabled
                 && 
PhoenixDatabaseMetaData.MIN_NAMESPACE_MAPPED_PHOENIX_VERSION > 
request.getClientVersion()) {
             logger.error("Old client is not compatible when" + " system tables 
are upgraded to map to namespace");

Reply via email to