[ 
https://issues.apache.org/jira/browse/PHOENIX-6186?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17234034#comment-17234034
 ] 

ASF GitHub Bot commented on PHOENIX-6186:
-----------------------------------------

ChinmaySKulkarni commented on a change in pull request #935:
URL: https://github.com/apache/phoenix/pull/935#discussion_r525452210



##########
File path: phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
##########
@@ -747,4 +640,83 @@ private void verifyExpectedCellValue(byte[] rowKey, byte[] 
syscatBytes,
             assertArrayEquals(expectedDateTypeBytes, 
CellUtil.cloneValue(cell));
         }
     }
+
+    @Test
+    public void testLastDDLTimestampBootstrap() throws Exception {
+        //Create a table, view, and index
+        String schemaName = "S_" + generateUniqueName();
+        String tableName = "T_" + generateUniqueName();
+        String viewName = "V_" + generateUniqueName();
+        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+        String fullViewName = SchemaUtil.getTableName(schemaName, viewName);
+        try (Connection conn = getConnection(false, null)) {
+            conn.createStatement().execute(
+                "CREATE TABLE " + fullTableName
+                    + " (PK1 VARCHAR NOT NULL, PK2 VARCHAR, KV1 VARCHAR, KV2 
VARCHAR CONSTRAINT " +
+                    "PK PRIMARY KEY(PK1, PK2)) ");
+            conn.createStatement().execute(
+                "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + 
fullTableName);
+
+            //Now we null out any existing last ddl timestamps
+            nullDDLTimestamps(conn);
+            conn.commit();

Review comment:
       nit: We already have `conn.commit()`inside `nullDDLTimestamps()`

##########
File path: phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
##########
@@ -91,122 +100,6 @@
 @Category(NeedsOwnMiniClusterTest.class)
 public class UpgradeIT extends ParallelStatsDisabledIT {
 
-    @Test
-    public void testMapTableToNamespaceDuringUpgrade()
-            throws SQLException, IOException, IllegalArgumentException, 
InterruptedException {
-        String[] strings = new String[] { "a", "b", "c", "d" };
-
-        try (Connection conn = DriverManager.getConnection(getUrl())) {
-            String schemaName = "TEST";
-            String phoenixFullTableName = schemaName + "." + 
generateUniqueName();
-            String indexName = "IDX_" + generateUniqueName();
-            String localIndexName = "LIDX_" + generateUniqueName();
-
-            String viewName = "VIEW_" + generateUniqueName();
-            String viewIndexName = "VIDX_" + generateUniqueName();
-
-            String[] tableNames = new String[] { phoenixFullTableName, 
schemaName + "." + indexName,
-                    schemaName + "." + localIndexName, "diff." + viewName, 
"test." + viewName, viewName};
-            String[] viewIndexes = new String[] { "diff." + viewIndexName, 
"test." + viewIndexName };
-            conn.createStatement().execute("CREATE TABLE " + 
phoenixFullTableName
-                    + "(k VARCHAR PRIMARY KEY, v INTEGER, f INTEGER, g INTEGER 
NULL, h INTEGER NULL)");
-            PreparedStatement upsertStmt = conn
-                    .prepareStatement("UPSERT INTO " + phoenixFullTableName + 
" VALUES(?, ?, 0, 0, 0)");
-            int i = 1;
-            for (String str : strings) {
-                upsertStmt.setString(1, str);
-                upsertStmt.setInt(2, i++);
-                upsertStmt.execute();
-            }
-            conn.commit();
-            // creating local index
-            conn.createStatement()
-                    .execute("create local index " + localIndexName + " on " + 
phoenixFullTableName + "(K)");
-            // creating global index
-            conn.createStatement().execute("create index " + indexName + " on 
" + phoenixFullTableName + "(k)");
-            // creating view in schema 'diff'
-            conn.createStatement().execute("CREATE VIEW diff." + viewName + " 
(col VARCHAR) AS SELECT * FROM " + phoenixFullTableName);
-            // creating view in schema 'test'
-            conn.createStatement().execute("CREATE VIEW test." + viewName + " 
(col VARCHAR) AS SELECT * FROM " + phoenixFullTableName);
-            conn.createStatement().execute("CREATE VIEW " + viewName + "(col 
VARCHAR) AS SELECT * FROM " + phoenixFullTableName);
-            // Creating index on views
-            conn.createStatement().execute("create index " + viewIndexName + " 
 on diff." + viewName + "(col)");
-            conn.createStatement().execute("create index " + viewIndexName + " 
on test." + viewName + "(col)");
-
-            // validate data
-            for (String tableName : tableNames) {
-                ResultSet rs = conn.createStatement().executeQuery("select * 
from " + tableName);
-                for (String str : strings) {
-                    assertTrue(rs.next());
-                    assertEquals(str, rs.getString(1));
-                }
-            }
-
-            // validate view Index data
-            for (String viewIndex : viewIndexes) {
-                ResultSet rs = conn.createStatement().executeQuery("select * 
from " + viewIndex);
-                for (String str : strings) {
-                    assertTrue(rs.next());
-                    assertEquals(str, rs.getString(2));
-                }
-            }
-
-            HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
-            assertTrue(admin.tableExists(phoenixFullTableName));
-            assertTrue(admin.tableExists(schemaName + 
QueryConstants.NAME_SEPARATOR + indexName));
-            
assertTrue(admin.tableExists(MetaDataUtil.getViewIndexPhysicalName(Bytes.toBytes(phoenixFullTableName))));
-            Properties props = new Properties();
-            props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(true));
-            
props.setProperty(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, 
Boolean.toString(false));
-            admin.close();
-            PhoenixConnection phxConn = DriverManager.getConnection(getUrl(), 
props).unwrap(PhoenixConnection.class);
-            UpgradeUtil.upgradeTable(phxConn, phoenixFullTableName);
-            phxConn.close();
-            props = new Properties();
-            phxConn = DriverManager.getConnection(getUrl(), 
props).unwrap(PhoenixConnection.class);
-            // purge MetaDataCache except for system tables
-            phxConn.getMetaDataCache().pruneTables(new PMetaData.Pruner() {
-                @Override public boolean prune(PTable table) {
-                    return table.getType() != PTableType.SYSTEM;
-                }
-
-                @Override public boolean prune(PFunction function) {
-                    return false;
-                }
-            });
-            admin = phxConn.getQueryServices().getAdmin();
-            String hbaseTableName = 
SchemaUtil.getPhysicalTableName(Bytes.toBytes(phoenixFullTableName), true)
-                    .getNameAsString();
-            assertTrue(admin.tableExists(hbaseTableName));
-            assertTrue(admin.tableExists(Bytes.toBytes(hbaseTableName)));
-            assertTrue(admin.tableExists(schemaName + 
QueryConstants.NAMESPACE_SEPARATOR + indexName));
-            
assertTrue(admin.tableExists(MetaDataUtil.getViewIndexPhysicalName(Bytes.toBytes(hbaseTableName))));
-            i = 0;
-            // validate data
-            for (String tableName : tableNames) {
-                ResultSet rs = phxConn.createStatement().executeQuery("select 
* from " + tableName);
-                for (String str : strings) {
-                    assertTrue(rs.next());
-                    assertEquals(str, rs.getString(1));
-                }
-            }
-            // validate view Index data
-            for (String viewIndex : viewIndexes) {
-                ResultSet rs = conn.createStatement().executeQuery("select * 
from " + viewIndex);
-                for (String str : strings) {
-                    assertTrue(rs.next());
-                    assertEquals(str, rs.getString(2));
-                }
-            }
-            PName tenantId = phxConn.getTenantId();
-            PName physicalName = PNameFactory.newName(hbaseTableName);
-            String newSchemaName = 
MetaDataUtil.getViewIndexSequenceSchemaName(physicalName, true);
-            String newSequenceName = 
MetaDataUtil.getViewIndexSequenceName(physicalName, tenantId, true);
-            verifySequenceValue(null, newSequenceName, newSchemaName, 
Short.MIN_VALUE + 3);
-            admin.close();
-        }
-    }
-
     @Test
     public void testMapMultiTenantTableToNamespaceDuringUpgrade() throws 
SQLException, SnapshotCreationException,

Review comment:
       Should we also move this test to `UpgradeNamespaceIT`?

##########
File path: phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
##########
@@ -387,6 +388,48 @@ public void testViewUsesTableLocalIndex() throws Exception 
{
         }
     }
 
+    @Test
+    public void testCreateViewTimestamp() throws Exception {
+        String tenantId = null;
+        createViewTimestampHelper(tenantId);
+    }
+
+    @Test
+    public void testCreateTenantViewTimestamp() throws Exception {
+        createViewTimestampHelper(TENANT1);
+    }
+
+    private void createViewTimestampHelper(String tenantId) throws 
SQLException {
+        Properties props = new Properties();
+        if (tenantId != null) {
+            props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+        } else {
+            tenantId = "";

Review comment:
       I think we can remove this `else` block since the reinitialized value 
for `tenantId` isn't being used anywhere 

##########
File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
##########
@@ -1216,5 +1216,251 @@ public void testDroppingIndexedColDropsViewIndex() 
throws Exception {
             assertNull(results.next());
         }
     }
-    
+
+    @Test
+    public void testAddThenDropColumnTableDDLTimestamp() throws Exception {
+        Properties props = new Properties();
+        String schemaName = SCHEMA1;
+        String dataTableName = "T_" + generateUniqueName();
+        String viewName = "V_" + generateUniqueName();
+        String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+        String viewFullName = SchemaUtil.getTableName(schemaName, viewName);
+
+        String tableDDL = generateDDL("CREATE TABLE IF NOT EXISTS " + 
dataTableFullName + " ("
+            + " %s ID char(1) NOT NULL,"
+            + " COL1 integer NOT NULL,"
+            + " COL2 bigint NOT NULL,"
+            + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1, COL2)"
+            + " ) %s");
+
+        String viewDDL = "CREATE VIEW " + viewFullName + " AS SELECT * FROM " 
+ dataTableFullName;
+
+        String columnAddDDL = "ALTER VIEW " + viewFullName + " ADD COL3 
varchar(50) NULL ";
+        String columnDropDDL = "ALTER VIEW " + viewFullName + " DROP COLUMN 
COL3 ";
+        long startTS = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+            conn.createStatement().execute(tableDDL);
+            //first get the original DDL timestamp when we created the table
+            long tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                dataTableFullName, startTS,
+                conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(viewDDL);
+            tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName, tableDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            //now add a column and make sure the timestamp updates
+            conn.createStatement().execute(columnAddDDL);
+            tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName,
+                tableDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(columnDropDDL);
+            CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName,
+                tableDDLTimestamp + 1 , conn);
+        }
+    }
+
+    @Test
+    public void testLastDDLTimestampForDivergedViews() throws Exception {
+        //Phoenix allows users to "drop" columns from views that are inherited 
from their ancestor
+        // views or tables. These columns are then excluded from the view 
schema, and the view is
+        // considered "diverged" from its parents, and so no longer inherit 
any additional schema
+        // changes that are applied to their ancestors. This test make sure 
that this behavior
+        // extends to DDL timestamp
+        String schemaName = SCHEMA1;
+        String dataTableName = "T_" + generateUniqueName();
+        String viewName = "V_" + generateUniqueName();
+        String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+        String viewFullName = SchemaUtil.getTableName(schemaName, viewName);
+
+        String tableDDL = generateDDL("CREATE TABLE IF NOT EXISTS " + 
dataTableFullName + " ("
+            + " %s ID char(1) NOT NULL,"
+            + " COL1 integer NOT NULL,"
+            + " COL2 bigint,"
+            + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1)"
+            + " ) %s");
+
+        String viewDDL = "CREATE VIEW " + viewFullName + " AS SELECT * FROM " 
+ dataTableFullName;
+
+        String divergeDDL = "ALTER VIEW " + viewFullName + " DROP COLUMN COL2";
+        String viewColumnAddDDL = "ALTER VIEW " + viewFullName + " ADD COL3 
varchar(50) NULL ";
+        String viewColumnDropDDL = "ALTER VIEW " + viewFullName + " DROP 
COLUMN COL3 ";
+        String tableColumnAddDDL = "ALTER TABLE " + dataTableFullName + " ADD 
COL4 varchar" +
+            "(50) NULL";
+        String tableColumnDropDDL = "ALTER TABLE " + dataTableFullName + " 
DROP COLUMN COL4 ";
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            conn.createStatement().execute(tableDDL);
+            long tableDDLTimestamp = CreateTableIT.getLastDDLTimestamp(conn, 
dataTableFullName);
+            conn.createStatement().execute(viewDDL);
+            long viewDDLTimestamp = CreateTableIT.getLastDDLTimestamp(conn, 
viewFullName);
+            Thread.sleep(1);
+            conn.createStatement().execute(divergeDDL);
+            //verify table DDL timestamp DID NOT change
+            assertEquals(tableDDLTimestamp, 
CreateTableIT.getLastDDLTimestamp(conn, dataTableFullName));
+            //verify view DDL timestamp changed
+            viewDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName, viewDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(viewColumnAddDDL);
+            //verify DDL timestamp changed because we added a column to the 
view
+            viewDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName, viewDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(viewColumnDropDDL);
+            //verify DDL timestamp changed because we dropped a column from 
the view
+            viewDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName, viewDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(tableColumnAddDDL);
+            //verify DDL timestamp DID NOT change because we added a column 
from the base table
+            assertEquals(viewDDLTimestamp, 
CreateTableIT.getLastDDLTimestamp(conn, viewFullName));
+            conn.createStatement().execute(tableColumnDropDDL);

Review comment:
       Column drop _should_ change the last DDL timestamp for child views as 
explained in my previous comment.

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/AddColumnMutator.java
##########
@@ -399,6 +400,17 @@ public MetaDataMutationResult 
validateAndAddMetadata(PTable table, byte[][] rowK
                                 rowKeyMetaData[TABLE_NAME_INDEX])));
             }
         }
+        if (isAddingColumns) {
+            //We're changing the application-facing schema by adding a column, 
so update the DDL
+            // timestamp
+            long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+            if (MetaDataUtil.isTableQueryable(table.getType())) {
+                
additionalTableMetadataMutations.add(MetaDataUtil.getLastDDLTimestampUpdate(tableHeaderRowKey,
+                    clientTimeStamp, serverTimestamp));
+            }
+            //we don't need to update the DDL timestamp for child views, 
because when we look up

Review comment:
       Just for clarity, can we mention that this logic only applies to 
non-diverged views?

##########
File path: phoenix-core/src/main/java/org/apache/phoenix/util/ViewUtil.java
##########
@@ -654,10 +654,17 @@ public static PTable addDerivedColumnsFromParent(PTable 
view, PTable parentTable
         }
 
         long maxTableTimestamp = view.getTimeStamp();
+        long maxDDLTimestamp = view.getLastDDLTimestamp() != null ? 
view.getLastDDLTimestamp() : 0L;
         int numPKCols = view.getPKColumns().size();
-        // set the final table timestamp as the max timestamp of the view/view 
index or its
-        // ancestors
+        // set the final table timestamp and DDL timestamp as the respective 
max timestamps of the
+        // view/view index or its ancestors
         maxTableTimestamp = Math.max(maxTableTimestamp, 
parentTable.getTimeStamp());
+        //Diverged views no longer inherit ddl timestamps from their ancestors 
because they don't
+        // inherit column changes

Review comment:
       Except for when a column is dropped from some ancestor after the view 
diverged. In that case, they _do_ "inherit" that change, just like they would 
have before diverging.

##########
File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
##########
@@ -1216,5 +1216,251 @@ public void testDroppingIndexedColDropsViewIndex() 
throws Exception {
             assertNull(results.next());
         }
     }
-    
+
+    @Test
+    public void testAddThenDropColumnTableDDLTimestamp() throws Exception {
+        Properties props = new Properties();
+        String schemaName = SCHEMA1;
+        String dataTableName = "T_" + generateUniqueName();
+        String viewName = "V_" + generateUniqueName();
+        String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+        String viewFullName = SchemaUtil.getTableName(schemaName, viewName);
+
+        String tableDDL = generateDDL("CREATE TABLE IF NOT EXISTS " + 
dataTableFullName + " ("
+            + " %s ID char(1) NOT NULL,"
+            + " COL1 integer NOT NULL,"
+            + " COL2 bigint NOT NULL,"
+            + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1, COL2)"
+            + " ) %s");
+
+        String viewDDL = "CREATE VIEW " + viewFullName + " AS SELECT * FROM " 
+ dataTableFullName;
+
+        String columnAddDDL = "ALTER VIEW " + viewFullName + " ADD COL3 
varchar(50) NULL ";
+        String columnDropDDL = "ALTER VIEW " + viewFullName + " DROP COLUMN 
COL3 ";
+        long startTS = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+            conn.createStatement().execute(tableDDL);
+            //first get the original DDL timestamp when we created the table
+            long tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                dataTableFullName, startTS,
+                conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(viewDDL);
+            tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName, tableDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            //now add a column and make sure the timestamp updates
+            conn.createStatement().execute(columnAddDDL);
+            tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName,
+                tableDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(columnDropDDL);
+            CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName,
+                tableDDLTimestamp + 1 , conn);
+        }
+    }
+
+    @Test
+    public void testLastDDLTimestampForDivergedViews() throws Exception {
+        //Phoenix allows users to "drop" columns from views that are inherited 
from their ancestor
+        // views or tables. These columns are then excluded from the view 
schema, and the view is
+        // considered "diverged" from its parents, and so no longer inherit 
any additional schema
+        // changes that are applied to their ancestors. This test make sure 
that this behavior
+        // extends to DDL timestamp
+        String schemaName = SCHEMA1;
+        String dataTableName = "T_" + generateUniqueName();
+        String viewName = "V_" + generateUniqueName();
+        String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+        String viewFullName = SchemaUtil.getTableName(schemaName, viewName);
+
+        String tableDDL = generateDDL("CREATE TABLE IF NOT EXISTS " + 
dataTableFullName + " ("
+            + " %s ID char(1) NOT NULL,"
+            + " COL1 integer NOT NULL,"
+            + " COL2 bigint,"
+            + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1)"
+            + " ) %s");
+
+        String viewDDL = "CREATE VIEW " + viewFullName + " AS SELECT * FROM " 
+ dataTableFullName;
+
+        String divergeDDL = "ALTER VIEW " + viewFullName + " DROP COLUMN COL2";
+        String viewColumnAddDDL = "ALTER VIEW " + viewFullName + " ADD COL3 
varchar(50) NULL ";
+        String viewColumnDropDDL = "ALTER VIEW " + viewFullName + " DROP 
COLUMN COL3 ";

Review comment:
       We should ideally drop a pre-existing column from the parent for this 
test rather than a column that was newly added to the parent itself.

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
##########
@@ -2846,21 +2872,25 @@ private MetaDataMutationResult mutateColumn(
                 separateLocalAndRemoteMutations(region, tableMetadata, 
localMutations,
                         remoteMutations);
                 if (!remoteMutations.isEmpty()) {
-                    // there should only be remote mutations if we are adding 
a column to a view
+                    // there should only be remote mutations if we are 
updating the last ddl

Review comment:
       This comment is no longer valid right?

##########
File path: phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
##########
@@ -387,6 +388,48 @@ public void testViewUsesTableLocalIndex() throws Exception 
{
         }
     }
 
+    @Test
+    public void testCreateViewTimestamp() throws Exception {
+        String tenantId = null;
+        createViewTimestampHelper(tenantId);

Review comment:
       nit: Don't need the `tenantId` variable here

##########
File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
##########
@@ -1216,5 +1216,251 @@ public void testDroppingIndexedColDropsViewIndex() 
throws Exception {
             assertNull(results.next());
         }
     }
-    
+
+    @Test
+    public void testAddThenDropColumnTableDDLTimestamp() throws Exception {
+        Properties props = new Properties();
+        String schemaName = SCHEMA1;
+        String dataTableName = "T_" + generateUniqueName();
+        String viewName = "V_" + generateUniqueName();
+        String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+        String viewFullName = SchemaUtil.getTableName(schemaName, viewName);
+
+        String tableDDL = generateDDL("CREATE TABLE IF NOT EXISTS " + 
dataTableFullName + " ("
+            + " %s ID char(1) NOT NULL,"
+            + " COL1 integer NOT NULL,"
+            + " COL2 bigint NOT NULL,"
+            + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1, COL2)"
+            + " ) %s");
+
+        String viewDDL = "CREATE VIEW " + viewFullName + " AS SELECT * FROM " 
+ dataTableFullName;
+
+        String columnAddDDL = "ALTER VIEW " + viewFullName + " ADD COL3 
varchar(50) NULL ";
+        String columnDropDDL = "ALTER VIEW " + viewFullName + " DROP COLUMN 
COL3 ";
+        long startTS = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+            conn.createStatement().execute(tableDDL);
+            //first get the original DDL timestamp when we created the table
+            long tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                dataTableFullName, startTS,
+                conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(viewDDL);
+            tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName, tableDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            //now add a column and make sure the timestamp updates
+            conn.createStatement().execute(columnAddDDL);
+            tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName,
+                tableDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(columnDropDDL);
+            CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName,
+                tableDDLTimestamp + 1 , conn);
+        }
+    }
+
+    @Test
+    public void testLastDDLTimestampForDivergedViews() throws Exception {
+        //Phoenix allows users to "drop" columns from views that are inherited 
from their ancestor
+        // views or tables. These columns are then excluded from the view 
schema, and the view is
+        // considered "diverged" from its parents, and so no longer inherit 
any additional schema
+        // changes that are applied to their ancestors. This test make sure 
that this behavior

Review comment:
       This is not entirely true. I know this is frustrating (I was in the same 
boat), but in 4.15+ based on what I've gathered, here are the rules for column 
inheritance w.r.t. diverged and non-diverged views:
   
   ### Dropping columns from a parent
   - Any columns dropped from an ancestor that is a **physical base table** are 
"inherited" by its entire child view hierarchy. This is obvious since the 
physical column no longer exists and so any descendant view wouldn't be able to 
project anything for those dropped columns.
     - Since the shape of the child view changes in this case, we should 
probably use `max(view_DDL_ts, parent_DDL_ts_corresponding_to_col_drop)`. 
     - Not sure how easy of a change it would be to store this ts corresponding 
to column dropped from the parent.
   - Any columns dropped from an ancestor that is a **view** are "inherited" by 
its entire child view hierarchy **even if a child view has diverged**.
     - Same here
   
   ### Adding columns to a parent
   - Any columns added to an ancestor that is a **view or physical base table** 
are only inherited by its descendant views that have **not diverged**.
     - I think your changes already capture this.
   
   Overall, the rule seems to be:
   Once a view diverges from its parent, any columns added to an ancestor base 
table/view are no longer propagated to the view. A column dropped on an 
ancestor base table/view is however, always propagated to the child view.

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DropColumnMutator.java
##########
@@ -268,6 +272,16 @@ public MetaDataMutationResult 
validateAndAddMetadata(PTable table,
             }
 
         }
+        //We're changing the application-facing schema by dropping a column, 
so update the DDL
+        // timestamp to current _server_ timestamp
+        if (MetaDataUtil.isTableQueryable(table.getType())) {
+            long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+            
additionalTableMetaData.add(MetaDataUtil.getLastDDLTimestampUpdate(tableHeaderRowKey,
+                clientTimeStamp, serverTimestamp));
+        }
+        //we don't need to update the DDL timestamp for any child views we may 
have, because

Review comment:
       For clarity, can we mention that this logic also applies for diverged 
views?

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
##########
@@ -2846,21 +2872,25 @@ private MetaDataMutationResult mutateColumn(
                 separateLocalAndRemoteMutations(region, tableMetadata, 
localMutations,
                         remoteMutations);
                 if (!remoteMutations.isEmpty()) {
-                    // there should only be remote mutations if we are adding 
a column to a view
+                    // there should only be remote mutations if we are 
updating the last ddl
+                    // timestamp for child views, or we are adding a column to 
a view
                     // that uses encoded column qualifiers (the remote 
mutations are to update the
                     // encoded column qualifier counter on the parent table)
-                    if (mutator.getMutateColumnType() == 
ColumnMutator.MutateColumnType.ADD_COLUMN
+                    if (childViews.size() > 0 || ( 
mutator.getMutateColumnType() == ColumnMutator.MutateColumnType.ADD_COLUMN

Review comment:
       Shouldn't this be done even if `if (!remoteMutations.isEmpty())` is 
false if the table/view has child views??

##########
File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
##########
@@ -1216,5 +1216,251 @@ public void testDroppingIndexedColDropsViewIndex() 
throws Exception {
             assertNull(results.next());
         }
     }
-    
+
+    @Test
+    public void testAddThenDropColumnTableDDLTimestamp() throws Exception {
+        Properties props = new Properties();
+        String schemaName = SCHEMA1;
+        String dataTableName = "T_" + generateUniqueName();
+        String viewName = "V_" + generateUniqueName();
+        String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+        String viewFullName = SchemaUtil.getTableName(schemaName, viewName);
+
+        String tableDDL = generateDDL("CREATE TABLE IF NOT EXISTS " + 
dataTableFullName + " ("
+            + " %s ID char(1) NOT NULL,"
+            + " COL1 integer NOT NULL,"
+            + " COL2 bigint NOT NULL,"
+            + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1, COL2)"
+            + " ) %s");
+
+        String viewDDL = "CREATE VIEW " + viewFullName + " AS SELECT * FROM " 
+ dataTableFullName;
+
+        String columnAddDDL = "ALTER VIEW " + viewFullName + " ADD COL3 
varchar(50) NULL ";
+        String columnDropDDL = "ALTER VIEW " + viewFullName + " DROP COLUMN 
COL3 ";
+        long startTS = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+            conn.createStatement().execute(tableDDL);
+            //first get the original DDL timestamp when we created the table
+            long tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                dataTableFullName, startTS,
+                conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(viewDDL);
+            tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName, tableDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            //now add a column and make sure the timestamp updates
+            conn.createStatement().execute(columnAddDDL);
+            tableDDLTimestamp = CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName,
+                tableDDLTimestamp + 1, conn);
+            Thread.sleep(1);
+            conn.createStatement().execute(columnDropDDL);
+            CreateTableIT.verifyLastDDLTimestamp(
+                viewFullName,
+                tableDDLTimestamp + 1 , conn);
+        }
+    }
+
+    @Test
+    public void testLastDDLTimestampForDivergedViews() throws Exception {
+        //Phoenix allows users to "drop" columns from views that are inherited 
from their ancestor
+        // views or tables. These columns are then excluded from the view 
schema, and the view is
+        // considered "diverged" from its parents, and so no longer inherit 
any additional schema
+        // changes that are applied to their ancestors. This test make sure 
that this behavior
+        // extends to DDL timestamp
+        String schemaName = SCHEMA1;
+        String dataTableName = "T_" + generateUniqueName();
+        String viewName = "V_" + generateUniqueName();
+        String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+        String viewFullName = SchemaUtil.getTableName(schemaName, viewName);
+
+        String tableDDL = generateDDL("CREATE TABLE IF NOT EXISTS " + 
dataTableFullName + " ("
+            + " %s ID char(1) NOT NULL,"
+            + " COL1 integer NOT NULL,"
+            + " COL2 bigint,"
+            + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1)"
+            + " ) %s");
+
+        String viewDDL = "CREATE VIEW " + viewFullName + " AS SELECT * FROM " 
+ dataTableFullName;
+
+        String divergeDDL = "ALTER VIEW " + viewFullName + " DROP COLUMN COL2";
+        String viewColumnAddDDL = "ALTER VIEW " + viewFullName + " ADD COL3 
varchar(50) NULL ";
+        String viewColumnDropDDL = "ALTER VIEW " + viewFullName + " DROP 
COLUMN COL3 ";
+        String tableColumnAddDDL = "ALTER TABLE " + dataTableFullName + " ADD 
COL4 varchar" +
+            "(50) NULL";
+        String tableColumnDropDDL = "ALTER TABLE " + dataTableFullName + " 
DROP COLUMN COL4 ";

Review comment:
       Same here.

##########
File path: phoenix-core/src/main/java/org/apache/phoenix/util/ViewUtil.java
##########
@@ -654,10 +654,17 @@ public static PTable addDerivedColumnsFromParent(PTable 
view, PTable parentTable
         }
 
         long maxTableTimestamp = view.getTimeStamp();
+        long maxDDLTimestamp = view.getLastDDLTimestamp() != null ? 
view.getLastDDLTimestamp() : 0L;
         int numPKCols = view.getPKColumns().size();
-        // set the final table timestamp as the max timestamp of the view/view 
index or its
-        // ancestors
+        // set the final table timestamp and DDL timestamp as the respective 
max timestamps of the
+        // view/view index or its ancestors
         maxTableTimestamp = Math.max(maxTableTimestamp, 
parentTable.getTimeStamp());
+        //Diverged views no longer inherit ddl timestamps from their ancestors 
because they don't
+        // inherit column changes

Review comment:
       Maybe we need to introduce another field in PTable to explicitly store 
any drop column DDLs being issued to a table/view. Then we could use that to 
get the lastDDLTs of diverged views.

##########
File path: phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
##########
@@ -107,6 +107,29 @@
             HColumnDescriptor.KEEP_DELETED_CELLS,
             HColumnDescriptor.REPLICATION_SCOPE);
 
+    public static Put getLastDDLTimestampUpdate(byte[] tableHeaderRowKey,
+                                                     long clientTimestamp,
+                                                     long lastDDLTimestamp) {
+        //use client timestamp as the timestamp of the Cell, to match the 
other Cells that might
+        // be created by this DDL. But the actual value will be a _server_ 
timestamp
+        Put p = new Put(tableHeaderRowKey, clientTimestamp);
+        byte[] lastDDLTimestampBytes = 
PLong.INSTANCE.toBytes(lastDDLTimestamp);
+        p.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            PhoenixDatabaseMetaData.LAST_DDL_TIMESTAMP_BYTES, 
lastDDLTimestampBytes);
+        return p;
+    }
+
+    /**
+     * Checks if a table is meant to be queried directly (and hence is 
relevant to external
+     * systems tracking Phoenix schema)
+     * @param tableType
+     * @return True if a table or view, false otherwise (such as for an index, 
system table, or
+     * subquery)
+     */
+    public static boolean isTableQueryable(PTableType tableType) {

Review comment:
       I also think this name is a little confusing. Maybe something like 
`isTableTypeDirectlyQueried()` is more in line with what we mean




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


> Store table metadata last modified timestamp in PTable / System.Catalog
> -----------------------------------------------------------------------
>
>                 Key: PHOENIX-6186
>                 URL: https://issues.apache.org/jira/browse/PHOENIX-6186
>             Project: Phoenix
>          Issue Type: New Feature
>            Reporter: Geoffrey Jacoby
>            Assignee: Geoffrey Jacoby
>            Priority: Major
>             Fix For: 4.16.0
>
>         Attachments: PHOENIX-6186-4.x.patch
>
>
> There are many reasons why it's useful to know when a particular table's 
> metadata was last modified. It's helpful when solving cache coherency 
> problems, and also in order to interact with external schema registries which 
> may have multiple versions of a particular schema and require a timestamp to 
> resolve ambiguities. 
> This JIRA will add a last modified timestamp field to System.Catalog, to be 
> updated both when creating a table/view and also when adding or removing a 
> column. Changing purely internal Phoenix properties will not update the 
> timestamp. 



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to