[ https://issues.apache.org/jira/browse/PHOENIX-6968?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17770766#comment-17770766 ]
ASF GitHub Bot commented on PHOENIX-6968: ----------------------------------------- shahrs87 commented on code in PR #1691: URL: https://github.com/apache/phoenix/pull/1691#discussion_r1342003921 ########## phoenix-core/src/it/java/org/apache/phoenix/end2end/InvalidateMetadataCacheIT.java: ########## @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end; + +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.phoenix.query.BaseTest; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.util.PropertiesUtil; +import org.apache.phoenix.util.ReadOnlyProps; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.util.Map; +import java.util.Properties; + +import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY; +import static org.apache.phoenix.coprocessor.MetaDataEndpointImpl.PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.fail; + +@Category({NeedsOwnMiniClusterTest.class }) +public class InvalidateMetadataCacheIT extends BaseTest { + private static final Logger LOGGER = LoggerFactory.getLogger(InvalidateMetadataCacheIT.class); + + @BeforeClass + public static synchronized void doSetup() throws Exception { + NUM_SLAVES_BASE = 2; + Map<String, String> props = Maps.newHashMapWithExpectedSize(1); + // to fail fast in case of exception. + props.put("hbase.client.retries.number", String.valueOf(0)); + props.put(REGIONSERVER_COPROCESSOR_CONF_KEY, + FailingPhoenixRegionServerEndpoint.class.getName()); + // Setting phoenix metadata cache invalidation timeout to a small number to fail fast. + props.put(PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS, String.valueOf(2000)); + setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); + } + + /** + * Add FailingPhoenixRegionServerEndpoint as regionserver co-processor. + * Make one of the regionserver sleep in invalidateServerMetadataCache method. This will trigger + * TimeoutException in MetadataEndpointImpl#invalidateServerMetadataCacheWithRetries method. + * Make sure that ALTER TABLE ADD COLUMN statement fails. + */ + @Test + public void testAddColumnWithTimeout() { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + String dataTableFullName = generateUniqueName(); + String ddl = getCreateTableStmt(dataTableFullName); + HRegionServer regionServerZero = utility.getMiniHBaseCluster().getRegionServer(0); + FailingPhoenixRegionServerEndpoint coprocForRS0 = + getFailingPhoenixRegionServerEndpoint(regionServerZero); + coprocForRS0.sleep(); + try(Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement().execute(ddl); + conn.createStatement().execute("ALTER TABLE " + dataTableFullName + + " ADD CF.col2 integer"); + fail("Shouldn't reach heere"); + } catch (Exception e) { + LOGGER.error("Exception while adding column", e); + // This is expected + } + } + + /** + * Add FailingPhoenixRegionServerEndpoint as regionserver co-processor. + * Make one of the regionserver throw Exception in invalidateServerMetadataCache method. + * Make sure that ALTER TABLE ADD COLUMN statement fails. + */ + @Test + public void testAddColumnWithOneRSFailing() { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + String dataTableFullName = generateUniqueName(); + String ddl = getCreateTableStmt(dataTableFullName); + HRegionServer regionServerZero = utility.getMiniHBaseCluster().getRegionServer(0); + FailingPhoenixRegionServerEndpoint coprocForRS0 = + getFailingPhoenixRegionServerEndpoint(regionServerZero); + coprocForRS0.throwException(); + try(Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement().execute(ddl); + conn.createStatement().execute("ALTER TABLE " + dataTableFullName + + " ADD CF.col2 integer"); + fail("Shouldn't reach here"); + } catch (Exception e) { + LOGGER.error("Exception while adding column", e); + // This is expected + } + } + + /** + * Add FailingPhoenixRegionServerEndpoint as regionserver co-processor. + * Do not throw Exception or sleep in invalidateServerMetadataCache method for any regionservers + * Make sure that ALTER TABLE ADD COLUMN statement succeeds. + */ + @Test + public void testAddColumnWithBothRSPassing() { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + String dataTableFullName = generateUniqueName(); + String ddl = getCreateTableStmt(dataTableFullName); + try(Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement().execute(ddl); + conn.createStatement().execute("ALTER TABLE " + dataTableFullName + + " ADD CF.col2 integer"); + } catch (Throwable t) { + fail("Shouldn't reach here"); + } + } + + private String getCreateTableStmt(String tableName) { + return "CREATE TABLE " + tableName + + " (a_string varchar not null, col1 integer" + + " CONSTRAINT pk PRIMARY KEY (a_string)) "; + } + + private FailingPhoenixRegionServerEndpoint getFailingPhoenixRegionServerEndpoint( + HRegionServer regionServer) { + FailingPhoenixRegionServerEndpoint coproc = regionServer + .getRegionServerCoprocessorHost() + .findCoprocessor(FailingPhoenixRegionServerEndpoint.class); + return coproc; + } +} Review Comment: Trying to think how can I make it succeed on the retry. Let me try something. > Create PhoenixRegionServerEndpoint#invalidateCache method to invalidate cache. > ------------------------------------------------------------------------------ > > Key: PHOENIX-6968 > URL: https://issues.apache.org/jira/browse/PHOENIX-6968 > Project: Phoenix > Issue Type: Sub-task > Reporter: Rushabh Shah > Assignee: Rushabh Shah > Priority: Major > > Whenever we update metadata (like alter table add column, drop table), we > need to invalidate metadata cache entry (introduced by PHOENIX-6943) on all > the regionservers which has that cache entry. First step would be to issue an > invalidate command on all the regionservers irrespective of whether that > regionserver has the cache entry. We can further optimize by invalidating > only on RS that has that cache entry. > In PHOENIX-6988 we created PhoenixRegionServerEndpoint implementing > RegionServerCoprocessor. We can create a new method in this co-proc something > like invalidateCache(CacheEntry) to invalidate cache for a given > table/view/index. -- This message was sent by Atlassian Jira (v8.20.10#820010)