[
https://issues.apache.org/jira/browse/PHOENIX-7025?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17774713#comment-17774713
]
ASF GitHub Bot commented on PHOENIX-7025:
-----------------------------------------
palashc commented on code in PR #1666:
URL: https://github.com/apache/phoenix/pull/1666#discussion_r1357549284
##########
phoenix-core/src/test/java/org/apache/phoenix/cache/ServerMetadataCacheTest.java:
##########
@@ -536,7 +525,391 @@ public void
testUpdateLastDDLTimestampViewAfterIndexCreation() throws Exception
}
}
- public long getLastDDLTimestamp(String tableName) throws SQLException {
+ /**
+ * Client-1 creates a table, upserts data and alters the table.
+ * Client-2 queries the table before and after the alter.
+ * Check queries work successfully in both cases and verify number of
addTable invocations.
+ */
+ @Test
+ public void testSelectQueryWithOldDDLTimestamp() throws SQLException {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+ String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+ String tableName = generateUniqueName();
+ ConnectionQueryServices spyCqs1 =
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+ ConnectionQueryServices spyCqs2 =
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+ int expectedNumCacheUpdates;
+
+ try (Connection conn1 = spyCqs1.connect(url1, props);
+ Connection conn2 = spyCqs2.connect(url2, props)) {
+
+ // create table with UCF=never and upsert data using client-1
+ createTable(conn1, tableName, NEVER);
+ upsert(conn1, tableName);
+
+ // select query from client-2 works to populate client side
metadata cache
+ // there should be 1 update to the client cache
+ query(conn2, tableName);
+ expectedNumCacheUpdates = 1;
+ Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+ .addTable(any(PTable.class), anyLong());
+
+ // add column using client-1 to update last ddl timestamp
+ alterTableAddColumn(conn1, tableName, "newCol1");
+
+ // reset the spy CQSI object
+ Mockito.reset(spyCqs2);
+
+ // select query from client-2 with old ddl timestamp works
+ // there should be one update to the client cache
+ query(conn2, tableName);
+ expectedNumCacheUpdates = 1;
+ Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+ .addTable(any(PTable.class), anyLong());
+
+ // select query from client-2 with latest ddl timestamp works
+ // there should be no more updates to client cache
+ query(conn2, tableName);
+ Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+ .addTable(any(PTable.class), anyLong());
+ }
+ }
+
+ /**
+ * Test DDL timestamp validation retry logic in case of any exception
+ * from Server other than StaleMetadataCacheException.
+ */
+ @Test
+ public void testSelectQueryServerSideExceptionInValidation() throws
Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+ String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+ String tableName = generateUniqueName();
+ ConnectionQueryServices spyCqs1 =
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+ ConnectionQueryServices spyCqs2 =
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+ ServerMetadataCache cache = null;
+
+ try (Connection conn1 = spyCqs1.connect(url1, props);
+ Connection conn2 = spyCqs2.connect(url2, props)) {
+
+ // create table and upsert using client-1
+ createTable(conn1, tableName, NEVER);
+ upsert(conn1, tableName);
+
+ // Instrument ServerMetadataCache to throw a SQLException once
+ cache = ServerMetadataCache.getInstance(config);
+ ServerMetadataCache spyCache = Mockito.spy(cache);
+ Mockito.doThrow(new
SQLException("FAIL")).doCallRealMethod().when(spyCache)
+ .getLastDDLTimestampForTable(any(), any(),
eq(Bytes.toBytes(tableName)));
+ ServerMetadataCache.setInstance(spyCache);
+
+ // query using client-2 should succeed
+ query(conn2, tableName);
+
+ // verify live region servers were refreshed
+ Mockito.verify(spyCqs2,
Mockito.times(1)).refreshLiveRegionServers();
+ }
+ }
+
+ /**
+ * Test Select query works when ddl timestamp validation with old
timestamp encounters an exception.
+ * Verify that the list of live region servers was refreshed when ddl
timestamp validation is retried.
+ * Verify that the client cache was updated after encountering
StaleMetadataCacheException.
+ */
+ @Test
+ public void testSelectQueryWithOldDDLTimestampWithExceptionRetry() throws
Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+ String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+ String tableName = generateUniqueName();
+ ConnectionQueryServices spyCqs1 =
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+ ConnectionQueryServices spyCqs2 =
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+ int expectedNumCacheUpdates;
+ ServerMetadataCache cache = null;
+
+ try (Connection conn1 = spyCqs1.connect(url1, props);
+ Connection conn2 = spyCqs2.connect(url2, props)) {
+
+ // create table and upsert using client-1
+ createTable(conn1, tableName, NEVER);
+ upsert(conn1, tableName);
+
+ // query using client-2 to populate cache
+ query(conn2, tableName);
+ expectedNumCacheUpdates = 1;
+ Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+ .addTable(any(PTable.class), anyLong());
+
+ // add column using client-1 to update last ddl timestamp
+ alterTableAddColumn(conn1, tableName, "newCol1");
+
+ // reset the spy CQSI object
+ Mockito.reset(spyCqs2);
+
+ // Instrument ServerMetadataCache to throw a SQLException once
+ cache = ServerMetadataCache.getInstance(config);
+ ServerMetadataCache spyCache = Mockito.spy(cache);
+ Mockito.doThrow(new
SQLException("FAIL")).doCallRealMethod().when(spyCache)
+ .getLastDDLTimestampForTable(any(), any(),
eq(Bytes.toBytes(tableName)));
+ ServerMetadataCache.setInstance(spyCache);
+
+ // query using client-2 should succeed, one cache update
+ query(conn2, tableName);
+ expectedNumCacheUpdates = 1;
+ Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+ .addTable(any(PTable.class), anyLong());
+
+ // verify live region servers were refreshed
+ Mockito.verify(spyCqs2,
Mockito.times(1)).refreshLiveRegionServers();
+ }
+ }
+
+ /**
+ * Test Select Query fails in case DDL timestamp validation throws
SQLException twice.
+ */
+ @Test
+ public void testSelectQueryFails() throws Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+ String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+ String tableName = generateUniqueName();
+ ConnectionQueryServices spyCqs1 =
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+ ConnectionQueryServices spyCqs2 =
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+ ServerMetadataCache cache = null;
+
+ try (Connection conn1 = spyCqs1.connect(url1, props);
+ Connection conn2 = spyCqs2.connect(url2, props)) {
+
+ // create table and upsert using client-1
+ createTable(conn1, tableName, NEVER);
+ upsert(conn1, tableName);
+
+ // Instrument ServerMetadataCache to throw a SQLException twice
+ cache = ServerMetadataCache.getInstance(config);
+ ServerMetadataCache spyCache = Mockito.spy(cache);
+ SQLException e = new SQLException("FAIL");
+ Mockito.doThrow(e).when(spyCache)
+ .getLastDDLTimestampForTable(any(), any(),
eq(Bytes.toBytes(tableName)));
+ ServerMetadataCache.setInstance(spyCache);
+
+ // query using client-2 should fail
+ query(conn2, tableName);
+ Assert.fail("Query should have thrown Exception");
+ }
+ catch (Exception e) {
+ Assert.assertTrue("SQLException was not thrown when last ddl
timestamp validation encountered errors twice.", e instanceof SQLException);
+ }
+ }
+
+
+ /**
+ * Client-1 creates a table, 2 level of views on it and alters the first
level view.
+ * Client-2 queries the second level view, verify that there were 3 cache
updates in client-2,
+ * one each for the two views and base table.
+ */
+ @Test
+ public void testSelectQueryOnView() throws Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+ String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+ String tableName = generateUniqueName();
+ ConnectionQueryServices spyCqs1 =
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+ ConnectionQueryServices spyCqs2 =
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+ int expectedNumCacheUpdates;
+
+ try (Connection conn1 = spyCqs1.connect(url1, props);
+ Connection conn2 = spyCqs2.connect(url2, props)) {
+
+ // create table using client-1
+ createTable(conn1, tableName, NEVER);
+ upsert(conn1, tableName);
+
+ // create 2 level of views using client-1
+ String view1 = generateUniqueName();
+ String view2 = generateUniqueName();
+ createView(conn1, tableName, view1);
+ createView(conn1, view1, view2);
+
+ // query second level view using client-2
+ query(conn2, view2);
+ expectedNumCacheUpdates = 3; // table, view1, view2
+ Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+ .addTable(any(PTable.class), anyLong());
+
+ // alter first level view using client-1 to update its last ddl
timestamp
+ alterViewAddColumn(conn1, view1, "foo");
+
+ // reset the spy CQSI object
+ Mockito.reset(spyCqs2);
+
+ // query second level view
+ query(conn2, view2);
+
+ // verify there was a getTable RPC for the view and all its
ancestors
+ Mockito.verify(spyCqs2, Mockito.times(1)).getTable(eq(null),
+ any(byte[].class),
eq(PVarchar.INSTANCE.toBytes(tableName)),
+ anyLong(), anyLong());
+ Mockito.verify(spyCqs2, Mockito.times(1)).getTable(eq(null),
+ any(byte[].class), eq(PVarchar.INSTANCE.toBytes(view1)),
+ anyLong(), anyLong());
+ Mockito.verify(spyCqs2, Mockito.times(1)).getTable(eq(null),
+ any(byte[].class), eq(PVarchar.INSTANCE.toBytes(view2)),
+ anyLong(), anyLong());
+
+ // verify that the view and all its ancestors were updated in the
client cache
+ expectedNumCacheUpdates = 3; // table, view1, view2
+ Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+ .addTable(any(PTable.class), anyLong());
+ }
+ }
+
+ /**
+ * Verify queries on system tables work as we will validate last ddl
timestamps for them also.
+ */
+ @Test
+ public void testSelectQueryOnSystemTables() throws Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String url = QueryUtil.getConnectionUrl(props, config, "client");
+ ConnectionQueryServices cqs = driver.getConnectionQueryServices(url,
props);
+
+ try (Connection conn = cqs.connect(url, props)) {
+ query(conn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+ query(conn, PhoenixDatabaseMetaData.SYSTEM_TASK_NAME);
+ query(conn, PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME);
+ query(conn, PhoenixDatabaseMetaData.SYSTEM_LOG_NAME);
+ }
+ }
+
+ /**
+ * Test query on index with stale last ddl timestamp.
+ * Client-1 creates a table and an index on it. Client-2 queries table
(with index hint) to populate its cache.
+ * Client-1 alters a property on the index. Client-2 queries the table
again.
+ * Verify that the second query works and the index metadata was updated
in the client cache.
+ */
+ @Test
+ public void testSelectQueryAfterAlterIndex() throws Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+ String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+ String tableName = generateUniqueName();
+ String indexName = generateUniqueName();
+ ConnectionQueryServices spyCqs1 =
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+ ConnectionQueryServices spyCqs2 =
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+
+ try (Connection conn1 = spyCqs1.connect(url1, props);
+ Connection conn2 = spyCqs2.connect(url2, props)) {
+
+ //client-1 creates a table and an index on it
+ createTable(conn1, tableName, NEVER);
+ createIndex(conn1, tableName, indexName, "v1");
+ TestUtil.waitForIndexState(conn1, indexName, PIndexState.ACTIVE);
+
+ //client-2 populates its cache, 1 getTable&addTable call for the
table
+ query(conn2, tableName);
+
+ //client-1 updates index property
+ alterIndexChangeStateToRebuild(conn1, tableName, indexName);
+
+ //client-2's query using the index should work
+ PhoenixStatement stmt =
conn2.createStatement().unwrap(PhoenixStatement.class);
Review Comment:
I wanted to make sure the statement's query plan used the index, I have an
assert on that following this. Everywhere else I use queryWithIndex() method.
> Create a new RPC to validate last ddl timestamp for read requests.
> ------------------------------------------------------------------
>
> Key: PHOENIX-7025
> URL: https://issues.apache.org/jira/browse/PHOENIX-7025
> Project: Phoenix
> Issue Type: Sub-task
> Reporter: Rushabh Shah
> Assignee: Palash Chauhan
> Priority: Major
>
> Introduce a new RPC request from phoenix client to any region server via
> PhoenixRegionServerEndpoint#validateLastDDLTimestamp. Since the last ddl
> timestamp cache is maintained by all the regionservers, you can choose any
> regionserver randomly. In future, we can make this rpc more resilient by
> sending this rpc to multiple regionservers simultaneously.
> If phoenix client throws StaleMetadataCacheException then invalidate the
> cache on the client side and retry executeQuery method while fetching the
> updated metadata from SYSCAT regionserver.
--
This message was sent by Atlassian Jira
(v8.20.10#820010)