[ 
https://issues.apache.org/jira/browse/PHOENIX-7025?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17771317#comment-17771317
 ] 

ASF GitHub Bot commented on PHOENIX-7025:
-----------------------------------------

shahrs87 commented on code in PR #1666:
URL: https://github.com/apache/phoenix/pull/1666#discussion_r1343403280


##########
phoenix-core/src/test/java/org/apache/phoenix/cache/ServerMetadataCacheTest.java:
##########
@@ -419,16 +423,291 @@ public void 
testInvalidateCacheForBaseTableWithUpdateIndexStatement() throws Exc
         }
     }
 
+    /**
+     * Client-1 creates a table, upserts data and alters the table.
+     * Client-2 queries the table before and after the alter.
+     * Check queries work successfully in both cases and verify number of 
addTable invocations.
+     */
+    @Test
+    public void testSelectQueryWithOldDDLTimestamp() throws SQLException {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+        String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+        String tableName = generateUniqueName();
+        ConnectionQueryServices spyCqs1 = 
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+        ConnectionQueryServices spyCqs2 = 
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+        int expectedNumCacheUpdates;
+
+        try (Connection conn1 = spyCqs1.connect(url1, props);
+             Connection conn2 = spyCqs2.connect(url2, props)) {
+
+            // create table with UCF=never and upsert data using client-1
+            createTable(conn1, tableName, NEVER);
+            upsert(conn1, tableName);
+
+            // select query from client-2 works to populate client side 
metadata cache
+            // there should be 1 update to the client cache
+            query(conn2, tableName);
+            expectedNumCacheUpdates = 1;
+            Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+                    .addTable(any(PTable.class), anyLong());
+
+            // add column using client-1 to update last ddl timestamp
+            alterTableAddColumn(conn1, tableName, "newCol1");
+
+            // invalidate region server cache
+            // TODO: remove this call after PHOENIX-6968 is committed.
+            ServerMetadataCache.resetCache();
+
+            // reset the spy CQSI object
+            Mockito.reset(spyCqs2);
+
+            // select query from client-2 with old ddl timestamp works
+            // there should be one update to the client cache
+            query(conn2, tableName);
+            expectedNumCacheUpdates = 1;
+            Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+                    .addTable(any(PTable.class), anyLong());
+
+            // select query from client-2 with latest ddl timestamp works
+            // there should be no more updates to client cache
+            query(conn2, tableName);
+            Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+                    .addTable(any(PTable.class), anyLong());
+        }
+    }
+
+    /**
+     * Test DDL timestamp validation retry logic in case of any exception
+     * from Server other than StaleMetadataCacheException.
+     */
+    @Test
+    public void testSelectQueryServerSideExceptionInValidation() throws 
Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+        String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+        String tableName = generateUniqueName();
+        ConnectionQueryServices spyCqs1 = 
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+        ConnectionQueryServices spyCqs2 = 
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+        ServerMetadataCache cache = null;
+
+        try (Connection conn1 = spyCqs1.connect(url1, props);
+             Connection conn2 = spyCqs2.connect(url2, props)) {
+
+            // create table and upsert using client-1
+            createTable(conn1, tableName, NEVER);
+            upsert(conn1, tableName);
+
+            // Instrument ServerMetadataCache to throw a SQLException once
+            cache = ServerMetadataCache.getInstance(config);
+            ServerMetadataCache spyCache = Mockito.spy(cache);
+            Mockito.doThrow(new 
SQLException("FAIL")).doCallRealMethod().when(spyCache)
+                    .getLastDDLTimestampForTable(any(), any(), 
eq(Bytes.toBytes(tableName)));
+            ServerMetadataCache.setInstance(spyCache);
+
+            // query using client-2 should succeed
+            query(conn2, tableName);
+
+            // verify live region servers were refreshed
+            Mockito.verify(spyCqs2, 
Mockito.times(1)).refreshLiveRegionServers();
+        }
+    }
+
+    /**
+     * Test Select query with old ddl timestamp and ddl timestamp validation 
encounters an exception.
+     */
+    @Test
+    public void testSelectQueryWithOldDDLTimestampWithExceptionRetry() throws 
Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+        String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+        String tableName = generateUniqueName();
+        ConnectionQueryServices spyCqs1 = 
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+        ConnectionQueryServices spyCqs2 = 
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+        int expectedNumCacheUpdates;
+        ServerMetadataCache cache = null;
+
+        try (Connection conn1 = spyCqs1.connect(url1, props);
+             Connection conn2 = spyCqs2.connect(url2, props)) {
+
+            // create table and upsert using client-1
+            createTable(conn1, tableName, NEVER);
+            upsert(conn1, tableName);
+
+            // query using client-2 to populate cache
+            query(conn2, tableName);
+            expectedNumCacheUpdates = 1;
+            Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+                    .addTable(any(PTable.class), anyLong());
+
+            // add column using client-1 to update last ddl timestamp
+            alterTableAddColumn(conn1, tableName, "newCol1");
+
+            // invalidate region server cache
+            // TODO: remove this call after PHOENIX-6968 is committed.
+            ServerMetadataCache.resetCache();
+
+            // reset the spy CQSI object
+            Mockito.reset(spyCqs2);
+
+            // Instrument ServerMetadataCache to throw a SQLException once
+            cache = ServerMetadataCache.getInstance(config);
+            ServerMetadataCache spyCache = Mockito.spy(cache);
+            Mockito.doThrow(new 
SQLException("FAIL")).doCallRealMethod().when(spyCache)
+                    .getLastDDLTimestampForTable(any(), any(), 
eq(Bytes.toBytes(tableName)));
+            ServerMetadataCache.setInstance(spyCache);
+
+            // query using client-2 should succeed, one cache update
+            query(conn2, tableName);
+            expectedNumCacheUpdates = 1;
+            Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+                    .addTable(any(PTable.class), anyLong());
+
+            // verify live region servers were refreshed
+            Mockito.verify(spyCqs2, 
Mockito.times(1)).refreshLiveRegionServers();
+        }
+    }
+
+    /**
+     * Test Select Query fails in case DDL timestamp validation throws 
SQLException twice.
+     */
+    @Test
+    public void testSelectQueryFails() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+        String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+        String tableName = generateUniqueName();
+        ConnectionQueryServices spyCqs1 = 
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+        ConnectionQueryServices spyCqs2 = 
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+        ServerMetadataCache cache = null;
+
+        try (Connection conn1 = spyCqs1.connect(url1, props);
+             Connection conn2 = spyCqs2.connect(url2, props)) {
+
+            // create table and upsert using client-1
+            createTable(conn1, tableName, NEVER);
+            upsert(conn1, tableName);
+
+            // Instrument ServerMetadataCache to throw a SQLException twice
+            cache = ServerMetadataCache.getInstance(config);
+            ServerMetadataCache spyCache = Mockito.spy(cache);
+            SQLException e = new SQLException("FAIL");
+            Mockito.doThrow(e).doThrow(e).when(spyCache)
+                    .getLastDDLTimestampForTable(any(), any(), 
eq(Bytes.toBytes(tableName)));
+            ServerMetadataCache.setInstance(spyCache);
+
+            // query using client-2 should fail
+            query(conn2, tableName);
+            Assert.fail("Query should have thrown Exception");
+        }
+        catch (Exception e) {
+            Assert.assertTrue("SQLException was not thrown when last ddl 
timestamp validation encountered errors twice.", e instanceof SQLException);
+        }
+    }
+  
+    
+    /**
+     * Client-1 creates a table, 2 level of views on it and alters the first 
level view.
+     * Client-2 queries the second level view, verify that there were 3 cache 
updates in client-2,
+     * one each for the two views and base table.
+     */
+    @Test
+    public void testSelectQueryOnView() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+        String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+        String tableName = generateUniqueName();
+        ConnectionQueryServices spyCqs1 = 
Mockito.spy(driver.getConnectionQueryServices(url1, props));
+        ConnectionQueryServices spyCqs2 = 
Mockito.spy(driver.getConnectionQueryServices(url2, props));
+        int expectedNumCacheUpdates;
+
+        try (Connection conn1 = spyCqs1.connect(url1, props);
+             Connection conn2 = spyCqs2.connect(url2, props)) {
+
+            // create table using client-1
+            createTable(conn1, tableName, NEVER);
+            upsert(conn1, tableName);
+
+            // create 2 level of views using client-1
+            String view1 = generateUniqueName();
+            String view2 = generateUniqueName();
+            createView(conn1, tableName, view1);
+            createView(conn1, view1, view2);
+
+            // query second level view using client-2
+            query(conn2, view2);
+            expectedNumCacheUpdates = 3; // table, view1, view2
+            Mockito.verify(spyCqs2, Mockito.times(expectedNumCacheUpdates))
+                    .addTable(any(PTable.class), anyLong());
+
+            // alter first level view using client-1 to update its last ddl 
timestamp
+            alterViewAddColumn(conn1, view1, "foo");
+
+            // invalidate region server cache
+            // TODO: remove this call after PHOENIX-6968 is committed.
+            ServerMetadataCache.resetCache();
+
+            // reset the spy CQSI object
+            Mockito.reset(spyCqs2);

Review Comment:
   But then we won't be able to test for which tables the updateCache was 
called. I think using Mockito.reset is fine since it is improving our test 
coverage.





> Create a new RPC to validate last ddl timestamp for read requests.
> ------------------------------------------------------------------
>
>                 Key: PHOENIX-7025
>                 URL: https://issues.apache.org/jira/browse/PHOENIX-7025
>             Project: Phoenix
>          Issue Type: Sub-task
>            Reporter: Rushabh Shah
>            Assignee: Palash Chauhan
>            Priority: Major
>
> Introduce a new RPC request from phoenix client to any region server via 
> PhoenixRegionServerEndpoint#validateLastDDLTimestamp. Since the last ddl 
> timestamp cache is maintained by all the regionservers, you can choose any 
> regionserver randomly. In future, we can make this rpc more resilient by 
> sending this rpc to multiple regionservers simultaneously.
> If phoenix client throws StaleMetadataCacheException then invalidate the 
> cache on the client side and retry executeQuery method while fetching the 
> updated metadata from SYSCAT regionserver.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to