This is an automated email from the ASF dual-hosted git repository.

lokiore pushed a commit to branch PHOENIX-6978-feature
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/PHOENIX-6978-feature by this 
push:
     new 4133ec3ff9 PHOENIX-6996 :- Provide an upgrade path for Phoenix tables 
with HBase TTL to move the… (#1752)
4133ec3ff9 is described below

commit 4133ec3ff9dd47775513d7a0f833e31defb85edf
Author: Lokesh Khurana <[email protected]>
AuthorDate: Tue Feb 20 17:02:54 2024 -0800

    PHOENIX-6996 :- Provide an upgrade path for Phoenix tables with HBase TTL 
to move the… (#1752)
    
    * Provide an upgrade path for Phoenix tables with HBase TTL to move their 
TTL spec to SYSTEM.CATALOG
    
    * Adding a config to enable/disable ViewTTL feature
---
 .../phoenix/end2end/BackwardCompatibilityIT.java   |   3 -
 .../phoenix/end2end/MoveTTLDuringUpgradeIT.java    | 152 +++++++++++++++++++++
 .../phoenix/end2end/ViewTTLNotEnabledIT.java       | 136 ++++++++++--------
 .../apache/phoenix/exception/SQLExceptionCode.java |   2 +
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java      |   1 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  31 ++++-
 .../org/apache/phoenix/query/QueryServices.java    |   2 +
 .../apache/phoenix/query/QueryServicesOptions.java |   2 +
 .../org/apache/phoenix/schema/MetaDataClient.java  |  22 +++
 .../java/org/apache/phoenix/util/ScanUtil.java     |   8 ++
 .../java/org/apache/phoenix/util/UpgradeUtil.java  | 128 +++++++++++++++++
 11 files changed, 420 insertions(+), 67 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index 15f0d82f58..8e90b24c46 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -43,7 +43,6 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.util.Collection;
@@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.phoenix.compat.hbase.CompatUtil;
 import org.apache.phoenix.coprocessor.SystemCatalogRegionObserver;
 import org.apache.phoenix.coprocessor.TaskMetaDataEndpoint;
 import 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.MavenCoordinates;
@@ -67,7 +65,6 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ServerUtil.ConnectionFactory;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MoveTTLDuringUpgradeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MoveTTLDuringUpgradeIT.java
new file mode 100644
index 0000000000..630a405198
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MoveTTLDuringUpgradeIT.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.UpgradeUtil;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_TTL;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED;
+import static 
org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES;
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_TIMEOUT_DURING_UPGRADE_MS;
+import static 
org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull;
+import static org.junit.Assert.assertEquals;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class MoveTTLDuringUpgradeIT extends ParallelStatsDisabledIT {
+    @Test
+    public void testMoveHBaseLevelTTLToSYSCAT() throws Exception {
+        String schema = "S_" + generateUniqueName();
+        Admin admin = getUtility().getAdmin();
+        Map<String, Integer> tableTTLMap = 
createMultiHBaseTablesAndEquivalentInSYSCAT(schema, admin);
+        try (PhoenixConnection conn = getConnection(false, 
null).unwrap(PhoenixConnection.class);
+             Connection metaConn = getConnection(false, null, false, false)){
+            PhoenixConnection phxMetaConn = 
metaConn.unwrap(PhoenixConnection.class);
+            phxMetaConn.setRunningUpgrade(true);
+
+            Map<String, String> options = new HashMap<>();
+            options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, 
Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS));
+            options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 
Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS));
+            String clientPort = 
getUtility().getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
+
+            String localQuorum = String.format("localhost:%s", clientPort);
+            options.put(QueryServices.ZOOKEEPER_QUORUM_ATTRIB, localQuorum);
+            options.put(QueryServices.ZOOKEEPER_PORT_ATTRIB, clientPort);
+            UpgradeUtil.moveHBaseLevelTTLToSYSCAT(phxMetaConn, options);
+
+            String sql = "SELECT TABLE_NAME, TTL FROM SYSTEM.CATALOG WHERE 
TABLE_SCHEM = '" + schema + "'";
+            ResultSet rs = conn.createStatement().executeQuery(sql);
+            while (rs.next()) {
+                String table = rs.getString(1);
+                int ttl = tableTTLMap.get(table);
+                //Check if TTL is moved to SYSCAT.
+                if (ttl != HConstants.FOREVER) {
+                    assertEquals(ttl, rs.getInt(2));
+                } else {
+                    assertEquals(ttl, TTL_NOT_DEFINED);
+                }
+                //Check if TTL at HBase level is reset.
+                TableDescriptor tableDescriptor = 
admin.getDescriptor(TableName.valueOf(SchemaUtil.getTableName(schema, table)));
+                ColumnFamilyDescriptor[] columnFamilies = 
tableDescriptor.getColumnFamilies();
+                assertEquals(DEFAULT_TTL, columnFamilies[0].getTimeToLive());
+            }
+        }
+
+    }
+
+    private Map<String, Integer> 
createMultiHBaseTablesAndEquivalentInSYSCAT(String schema, Admin admin) throws 
SQLException, IOException {
+        String table = "";
+        int numOfTable = 20;
+        int randomTTL = 0;
+        Map<String, Integer> tableTTLMap = new HashMap<>();
+        for (int i = 0; i < numOfTable; i++ ) {
+            table = "T_" + generateUniqueName();
+            randomTTL = i%3 == 0 ? HConstants.FOREVER : 100 + 
(int)(Math.random() * 1000);
+            tableTTLMap.put(table, randomTTL);
+            TableName tableName = 
TableName.valueOf(SchemaUtil.getTableName(schema, table));
+            TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
+            builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(
+                    
DEFAULT_COLUMN_FAMILY_BYTES).setTimeToLive(randomTTL).build());
+            admin.createTable(builder.build());
+            upsertIntoSYSCAT(schema, table);
+        }
+        return tableTTLMap;
+    }
+
+    private void upsertIntoSYSCAT(String schema, String table) throws 
SQLException {
+        try (PhoenixConnection connection = getConnection(false, 
null).unwrap(PhoenixConnection.class)){
+            String dml = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, 
TABLE_NAME, TTL) VALUES (?,?,?,?)";
+            PreparedStatement statement = connection.prepareStatement(dml);
+            statement.setString(1, null);
+            statement.setString(2, schema);
+            statement.setString(3, table);
+            statement.setNull(4, Types.INTEGER);
+        }
+
+    }
+
+    private Connection getConnection(boolean tenantSpecific, String tenantId,
+                                     boolean isNamespaceMappingEnabled, 
boolean copyChildLinksDuringUpgrade)
+            throws SQLException {
+        if (tenantSpecific) {
+            checkNotNull(tenantId);
+            return createTenantConnection(tenantId);
+        }
+        Properties props = new Properties();
+        if (isNamespaceMappingEnabled){
+            props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
"true");
+        }
+        if (copyChildLinksDuringUpgrade){
+            
props.setProperty(QueryServices.MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED, 
"false");
+        }
+        return DriverManager.getConnection(getUrl(), props);
+    }
+    private Connection getConnection(boolean tenantSpecific, String tenantId) 
throws SQLException {
+        return getConnection(tenantSpecific, tenantId, false, false);
+    }
+
+    private Connection createTenantConnection(String tenantId) throws 
SQLException {
+        Properties props = new Properties();
+        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+        return DriverManager.getConnection(getUrl(), props);
+    }
+
+}
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLNotEnabledIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLNotEnabledIT.java
index 6349dd97a3..e1779e46c9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLNotEnabledIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLNotEnabledIT.java
@@ -22,84 +22,106 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.coprocessor.PhoenixTTLRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.ScanUtil;
 import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
+import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Map;
 import java.util.Properties;
 
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
-@Ignore
+import static org.junit.Assert.fail;
+
 @Category(NeedsOwnMiniClusterTest.class)
 public class ViewTTLNotEnabledIT extends ParallelStatsDisabledIT {
 
+    @BeforeClass
+    public static synchronized void doSetup() throws Exception {
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
+        props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, 
Integer.toString(60*60)); // An hour
+        props.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, 
Boolean.toString(false));
+        props.put(QueryServices.PHOENIX_VIEW_TTL_ENABLED, 
Boolean.toString(false));
+        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    }
+    @Test
+    public void testCreateViewWithTTLWithConfigFalse() throws Exception {
+        PhoenixTestBuilder.SchemaBuilder schemaBuilder = new 
PhoenixTestBuilder.SchemaBuilder(getUrl());
+        PhoenixTestBuilder.SchemaBuilder.GlobalViewOptions globalViewOptions = 
PhoenixTestBuilder.SchemaBuilder.
+                GlobalViewOptions.withDefaults();
+        globalViewOptions.setTableProps("TTL = 10000");
+        try {
+            
schemaBuilder.withTableOptions(PhoenixTestBuilder.SchemaBuilder.TableOptions.withDefaults()).withGlobalViewOptions(
+                    globalViewOptions).build();
+            fail();
+        } catch (SQLException sqe) {
+            Assert.assertEquals(sqe.getErrorCode(), 
SQLExceptionCode.VIEW_TTL_NOT_ENABLED.getErrorCode());
+        }
+    }
+
     @Test
-    public void testPhoenixTTLNotEnabled() throws Exception {
-
-        // PHOENIX TTL is set in seconds (for e.g 10 secs)
-        long phoenixTTL = 10;
-        PhoenixTestBuilder.SchemaBuilder.TableOptions
-                tableOptions = 
PhoenixTestBuilder.SchemaBuilder.TableOptions.withDefaults();
-        tableOptions.getTableColumns().clear();
-        tableOptions.getTableColumnTypes().clear();
-
-        PhoenixTestBuilder.SchemaBuilder.TenantViewOptions
-                tenantViewOptions = 
PhoenixTestBuilder.SchemaBuilder.TenantViewOptions.withDefaults();
-        tenantViewOptions.setTableProps(String.format("PHOENIX_TTL=%d", 
phoenixTTL));
-
-        // Define the test schema.
-        final PhoenixTestBuilder.SchemaBuilder schemaBuilder = new 
PhoenixTestBuilder.SchemaBuilder(url);
-        schemaBuilder
-                .withTableOptions(tableOptions)
-                .withTenantViewOptions(tenantViewOptions)
-                .build();
-
-        String viewName = schemaBuilder.getEntityTenantViewName();
-
-        Properties props = new Properties();
-        String tenantConnectUrl =
-                url + ';' + TENANT_ID_ATTRIB + '=' + 
schemaBuilder.getDataOptions().getTenantId();
-
-        // Test the coproc is not registered
-        org.apache.hadoop.hbase.client.Connection hconn = 
getUtility().getConnection();
-        Admin admin = hconn.getAdmin();
-        HTableDescriptor tableDescriptor = admin.getTableDescriptor(
-                TableName.valueOf(schemaBuilder.getEntityTableName()));
-        Assert.assertFalse("Coprocessor " + 
PhoenixTTLRegionObserver.class.getName()
-                        + " should not have been added: ",
-                
tableDescriptor.hasCoprocessor(PhoenixTTLRegionObserver.class.getName()));
-
-
-        // Test masking expired rows property are not set
-        try (Connection conn = DriverManager.getConnection(tenantConnectUrl, 
props);
-                final Statement statement = conn.createStatement()) {
-            conn.setAutoCommit(true);
-
-            final String stmtString = String.format("select * from  %s", 
viewName);
-            Preconditions.checkNotNull(stmtString);
-            final PhoenixStatement pstmt = 
statement.unwrap(PhoenixStatement.class);
-            final QueryPlan queryPlan = pstmt.optimizeQuery(stmtString);
-
-            PhoenixResultSet
-                    rs = pstmt.newResultSet(queryPlan.iterator(), 
queryPlan.getProjector(), queryPlan.getContext());
-            Assert.assertFalse("Should not have any rows", rs.next());
-            Assert.assertEquals("Should have at least one element", 1, 
queryPlan.getScans().size());
-            Assert.assertEquals("PhoenixTTL should not be set",
-                    0, ScanUtil.getTTL(queryPlan.getScans().get(0).get(0)));
-            Assert.assertFalse("Masking attribute should not be set",
-                    
ScanUtil.isMaskTTLExpiredRows(queryPlan.getScans().get(0).get(0)));
-            Assert.assertFalse("Delete Expired attribute should not set",
-                    
ScanUtil.isDeleteTTLExpiredRows(queryPlan.getScans().get(0).get(0)));
+    public void testAlterViewWithTTLWithConfigFalse() throws Exception {
+        PhoenixTestBuilder.SchemaBuilder schemaBuilder = new 
PhoenixTestBuilder.SchemaBuilder(getUrl());
+        PhoenixTestBuilder.SchemaBuilder.GlobalViewOptions globalViewOptions = 
PhoenixTestBuilder.SchemaBuilder.
+                GlobalViewOptions.withDefaults();
+        
schemaBuilder.withTableOptions(PhoenixTestBuilder.SchemaBuilder.TableOptions.withDefaults()).withGlobalViewOptions(
+                    globalViewOptions).build();
+
+        String dml = "ALTER VIEW " + schemaBuilder.getEntityGlobalViewName() + 
" SET TTL = 10000";
+        try (Connection connection = DriverManager.getConnection(getUrl())){
+            try {
+                connection.createStatement().execute(dml);
+                fail();
+            } catch (SQLException sqe) {
+                Assert.assertEquals(sqe.getErrorCode(), 
SQLExceptionCode.VIEW_TTL_NOT_ENABLED.getErrorCode());
+            }
+
+        }
+    }
+
+    @Test
+    public void testSettingTTLFromTableToViewWithConfigDisabled() throws 
Exception {
+        PhoenixTestBuilder.SchemaBuilder schemaBuilder = new 
PhoenixTestBuilder.SchemaBuilder(getUrl());
+        PhoenixTestBuilder.SchemaBuilder.TableOptions tableOptions = 
PhoenixTestBuilder.SchemaBuilder.TableOptions.withDefaults();
+        tableOptions.setTableProps("TTL = 10000");
+        PhoenixTestBuilder.SchemaBuilder.GlobalViewOptions globalViewOptions = 
PhoenixTestBuilder.SchemaBuilder.
+                GlobalViewOptions.withDefaults();
+        schemaBuilder.withTableOptions(tableOptions).withGlobalViewOptions(
+                globalViewOptions).build();
+
+        try (Connection connection = DriverManager.getConnection(getUrl())){
+
+            String dml = "ALTER TABLE " + schemaBuilder.getEntityTableName() + 
" SET TTL = NONE";
+            connection.createStatement().execute(dml);
+
+            //Clearing cache as  metaDataCaching is not there for TTL usecase
+            
connection.unwrap(PhoenixConnection.class).getQueryServices().clearCache();
+
+            try {
+                dml = "ALTER VIEW " + schemaBuilder.getEntityGlobalViewName() 
+ " SET TTL = 10000";
+                connection.createStatement().execute(dml);
+                fail();
+            } catch (SQLException sqe) {
+                Assert.assertEquals(sqe.getErrorCode(), 
SQLExceptionCode.VIEW_TTL_NOT_ENABLED.getErrorCode());
+            }
+
         }
     }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 3a320cfb53..b8e7c2e87c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -368,6 +368,8 @@ public enum SQLExceptionCode {
 
     TTL_ALREADY_DEFINED_IN_HIERARCHY(10958, "44A40", TTL
             + " property is already defined in hierarchy for this entity"),
+    VIEW_TTL_NOT_ENABLED(10959,"44A41", TTL +
+            " property can not be set on views as phoenix.view.ttl.enabled is 
false"),
 
     /** Sequence related */
     SEQUENCE_ALREADY_EXIST(1200, "42Z00", "Sequence already exists.", new 
Factory() {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 8632f056ba..7126b34574 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -139,6 +139,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
     public static final String TABLE_CAT = "TABLE_CAT";
     public static final String TABLE_CATALOG = "TABLE_CATALOG";
     public static final String TABLE_SCHEM = "TABLE_SCHEM";
+    public static final byte[] TABLE_SCHEM_BYTES = Bytes.toBytes(TABLE_SCHEM);
     public static final String LOGICAL_TABLE_NAME = "LOGICAL_TABLE_NAME";
     public static final String LOGICAL_PARENT_NAME = "LOGICAL_PARENT_NAME";
     public static final String REMARKS = "REMARKS";
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index eea4a0f72a..4eddce3800 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -75,6 +75,7 @@ import static 
org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_SUCC
 import static 
org.apache.phoenix.monitoring.MetricType.TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS;
 import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_ENABLED;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS;
@@ -1096,9 +1097,8 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
     }
 
     private boolean isPhoenixTTLEnabled() {
-        boolean ttl = 
config.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED,
+         return config.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED,
                 QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED);
-        return ttl;
     }
 
 
@@ -3461,12 +3461,26 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
     }
 
     private void copyDataFromPhoenixTTLtoTTL(PhoenixConnection 
oldMetaConnection) throws IOException {
-        // Increase the timeouts so that the scan queries during Copy Data do 
not timeout
+        //If ViewTTL is enabled then only copy values from PHOENIX_TTL Column 
to TTL Column
+        if 
(oldMetaConnection.getQueryServices().getConfiguration().getBoolean(PHOENIX_VIEW_TTL_ENABLED,
+                DEFAULT_PHOENIX_VIEW_TTL_ENABLED)) {
+            // Increase the timeouts so that the scan queries during Copy Data 
do not timeout
+            // on large SYSCAT Tables
+            Map<String, String> options = new HashMap<>();
+            options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, 
Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS));
+            options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 
Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS));
+            copyTTLValuesFromPhoenixTTLColumnToTTLColumn(oldMetaConnection, 
options);
+        }
+
+    }
+
+    private void moveTTLFromHBaseLevelTTLToPhoenixLevelTTL(PhoenixConnection 
oldMetaConnection) throws IOException {
+        // Increase the timeouts so that the scan queries during Copy Data 
does not timeout
         // on large SYSCAT Tables
         Map<String, String> options = new HashMap<>();
         options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, 
Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS));
         options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 
Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS));
-        copyTTLValuesFromPhoenixTTLColumnToTTLColumn(oldMetaConnection, 
options);
+        UpgradeUtil.moveHBaseLevelTTLToSYSCAT(oldMetaConnection, options);
     }
 
     // Available for testing
@@ -4212,9 +4226,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
             metaConnection = addColumnsIfNotExists(metaConnection, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
                     MIN_SYSTEM_TABLE_TIMESTAMP_5_2_0,
                     PhoenixDatabaseMetaData.INDEX_WHERE + " " + 
PVarchar.INSTANCE.getSqlTypeName());
-            //Copy Data From PHOENIX_TTL column to TTL as PHOENIX_TTL column 
will be removed in
-            //later release.
-            copyDataFromPhoenixTTLtoTTL(metaConnection);
+            //Values in PHOENIX_TTL column will not be used for further 
release as PHOENIX_TTL column is being deprecated
+            //and will be removed in later release. To copy 
copyDataFromPhoenixTTLtoTTL(metaConnection) can be used but
+            //as that feature was not fully built we are not moving old value 
to new column
+
+            //move TTL values stored in descriptor to SYSCAT TTL column.
+            moveTTLFromHBaseLevelTTLToPhoenixLevelTTL(metaConnection);
             UpgradeUtil.bootstrapLastDDLTimestampForIndexes(metaConnection);
         }
         return metaConnection;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 66759b429f..fb1c0d839b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -361,6 +361,8 @@ public interface QueryServices extends SQLCloseable {
     public static final String PHOENIX_SERVER_PAGE_SIZE_MS = 
"phoenix.server.page.size.ms";
     // Phoenix TTL implemented by CompactionScanner and TTLRegionScanner is 
enabled
     public static final String PHOENIX_TABLE_TTL_ENABLED = 
"phoenix.table.ttl.enabled";
+    //Property to know whether TTL at View Level is enabled
+    public static final String PHOENIX_VIEW_TTL_ENABLED = 
"phoenix.view.ttl.enabled";
 
 
     // Before 4.15 when we created a view we included the parent table column 
metadata in the view
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index e4a1399e58..fc16d838ca 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -420,6 +420,8 @@ public class QueryServicesOptions {
     public static final int DEFAULT_SCAN_PAGE_SIZE = 32768;
     public static final boolean DEFAULT_APPLY_TIME_ZONE_DISPLACMENT = false;
     public static final boolean DEFAULT_PHOENIX_TABLE_TTL_ENABLED = true;
+    public static final boolean DEFAULT_PHOENIX_VIEW_TTL_ENABLED = true;
+
     public static final int DEFAULT_MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN = 5;
     public static final boolean DEFAULT_SERVER_MERGE_FOR_UNCOVERED_INDEX = 
true;
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index c99f3a0127..ed284fe5cc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1130,6 +1130,12 @@ public class MetaDataClient {
                 QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED);
     }
 
+    private boolean isViewTTLEnabled() {
+        return connection.getQueryServices().getConfiguration().
+                getBoolean(QueryServices.PHOENIX_VIEW_TTL_ENABLED,
+                        QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED);
+    }
+
     public MutationState updateStatistics(UpdateStatisticsStatement 
updateStatisticsStmt)
             throws SQLException {
         // Don't mistakenly commit pending rows
@@ -2183,6 +2189,14 @@ public class MetaDataClient {
                             .build()
                             .buildException();
                 }
+                if (!isViewTTLEnabled() && tableType == VIEW) {
+                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.
+                            VIEW_TTL_NOT_ENABLED)
+                            .setSchemaName(schemaName)
+                            .setTableName(tableName)
+                            .build()
+                            .buildException();
+                }
 
                 if (tableType != TABLE && (tableType != VIEW || viewType != 
UPDATABLE)) {
                     throw new SQLExceptionInfo.Builder(SQLExceptionCode.
@@ -5680,6 +5694,14 @@ public class MetaDataClient {
                         .build()
                         .buildException();
             }
+
+            if (!isViewTTLEnabled() && table.getType() == PTableType.VIEW) {
+                throw new SQLExceptionInfo.Builder(
+                        SQLExceptionCode.VIEW_TTL_NOT_ENABLED)
+                        .build()
+                        .buildException();
+            }
+
             if (table.getType() != PTableType.TABLE && (table.getType() != 
PTableType.VIEW ||
                     table.getViewType() != UPDATABLE)) {
                 throw new SQLExceptionInfo.Builder(
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index a088628b0f..f06c7d302d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -1221,6 +1221,14 @@ public class ScanUtil {
     public static void setScanAttributesForPhoenixTTL(Scan scan, PTable table,
             PhoenixConnection phoenixConnection) throws SQLException {
 
+        //If entity is a view and phoenix.view.ttl.enabled is false then don't 
set TTL scan attribute.
+        if ((table.getType() == PTableType.VIEW) && 
!phoenixConnection.getQueryServices().getConfiguration().getBoolean(
+                QueryServices.PHOENIX_VIEW_TTL_ENABLED,
+                QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED
+        )) {
+            return;
+        }
+
         // If Phoenix level TTL is not enabled OR is a system table then 
return.
         if 
(!isPhoenixTableTTLEnabled(phoenixConnection.getQueryServices().getConfiguration()))
 {
             if (SchemaUtil.isSystemTable(
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index c707b14343..bef7b0b8e5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -17,8 +17,14 @@
  */
 package org.apache.phoenix.util;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHOENIX_TTL_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_BYTES;
+import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME_BYTES;
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED;
 import static 
org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.CURRENT_CLIENT_VERSION;
 import static org.apache.phoenix.coprocessor.MetaDataProtocol.getVersion;
@@ -80,6 +86,7 @@ import java.sql.Types;
 import java.text.Format;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -1470,6 +1477,127 @@ public class UpgradeUtil {
 
     }
 
+    public static void moveHBaseLevelTTLToSYSCAT(PhoenixConnection 
oldMetaConnection,
+                                                 Map<String, String> options) 
throws IOException {
+        long numOfTableThatHasTTLMoved = 0;
+        ReadOnlyProps readOnlyProps = 
oldMetaConnection.getQueryServices().getProps();
+        TableName sysCat = 
SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME, readOnlyProps);
+
+        LOGGER.debug(String.format("SYSTEM CATALOG table use for copying TTL 
values: %s", sysCat.toString()));
+        Configuration conf = 
oldMetaConnection.getQueryServices().getConfiguration();
+        try (org.apache.hadoop.hbase.client.Connection moveTTLConnection =  
getHBaseConnection(conf, options);
+             Table sysCatalogTable = moveTTLConnection.getTable(sysCat);
+             Admin admin = moveTTLConnection.getAdmin()) {
+            //Scan SYSCAT for all tables...
+                boolean pageMore = false;
+                byte[] lastRowKey = null;
+
+                do {
+                    Scan scan = new Scan();
+                    scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES);
+                    // Push down the filter to hbase to avoid transfer
+                    SingleColumnValueFilter tableFilter = new 
SingleColumnValueFilter(
+                            DEFAULT_COLUMN_FAMILY_BYTES,
+                            TABLE_TYPE_BYTES, CompareOperator.EQUAL,
+                            
PTableType.TABLE.getSerializedValue().getBytes(StandardCharsets.UTF_8));
+
+                    tableFilter.setFilterIfMissing(true);
+                    // Limit number of records
+                    PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE);
+
+                    scan.setFilter(new 
FilterList(FilterList.Operator.MUST_PASS_ALL, pf, tableFilter));
+                    if (pageMore) {
+                        scan.withStartRow(lastRowKey, false);
+                    }
+                    // Collect the row keys to process them in batch
+                    try (ResultScanner scanner = 
sysCatalogTable.getScanner(scan)) {
+                        int count = 0;
+                        List<byte[]> rowKeys = new ArrayList<>();
+                        List<Put> puts = new ArrayList<>();
+                        for (Result rr = scanner.next(); rr != null; rr = 
scanner.next()) {
+                            count++;
+                            lastRowKey = rr.getRow();
+                            byte[] tmpKey = new byte[lastRowKey.length];
+                            System.arraycopy(lastRowKey, 0, tmpKey, 0, 
tmpKey.length);
+                            rowKeys.add(tmpKey);
+                            String tableName = 
SchemaUtil.getTableName(rr.getValue(
+                                            DEFAULT_COLUMN_FAMILY_BYTES, 
TABLE_SCHEM_BYTES),
+                                    rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES, 
TABLE_NAME_BYTES));
+                            if (tableName == null || 
Arrays.equals(rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES,
+                                    TABLE_SCHEM_BYTES), 
SYSTEM_SCHEMA_NAME_BYTES)) {
+                                //We do not support system table ttl through 
phoenix ttl, and it will be moved to a
+                                //constant value in future commit.
+                                continue;
+                            }
+                            TableDescriptor tableDesc = 
admin.getDescriptor(SchemaUtil.getPhysicalTableName(
+                                    tableName, readOnlyProps));
+                            int ttl = 
tableDesc.getColumnFamily(DEFAULT_COLUMN_FAMILY_BYTES).
+                                    getTimeToLive();
+                            if (ttl != 
ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
+                                //As we have ttl defined fot this table create 
a Put to set TTL.
+                                long rowTS = rr.rawCells()[0].getTimestamp();
+                                Put put = new Put(tmpKey);
+                                put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, 
EMPTY_COLUMN_BYTES, rowTS,
+                                        EMPTY_COLUMN_VALUE_BYTES);
+                                put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, 
TTL_BYTES, rowTS,
+                                        PInteger.INSTANCE.toBytes(ttl));
+                                puts.add(put);
+
+                                //Set TTL to Default at CF level when Phoenix 
level ttl is enabled
+                                if 
(oldMetaConnection.getQueryServices().getConfiguration().getBoolean(
+                                        
QueryServices.PHOENIX_TABLE_TTL_ENABLED, DEFAULT_PHOENIX_TABLE_TTL_ENABLED)) {
+                                    ColumnFamilyDescriptor 
columnFamilyDescriptor = ColumnFamilyDescriptorBuilder
+                                            
.newBuilder(DEFAULT_COLUMN_FAMILY_BYTES).setTimeToLive(
+                                                    
ColumnFamilyDescriptorBuilder.DEFAULT_TTL).build();
+                                    TableDescriptor tableDescriptor = 
TableDescriptorBuilder.newBuilder(
+                                            
admin.getDescriptor(SchemaUtil.getPhysicalTableName(
+                                            tableName, 
readOnlyProps))).modifyColumnFamily(
+                                                    
columnFamilyDescriptor).build();
+                                    admin.modifyTable(tableDescriptor);
+                                }
+                            }
+                        }
+
+                        if (!puts.isEmpty()) {
+                            Object[] putResults = new Object[puts.size()];
+                            try (Table moveTTLTable = 
moveTTLConnection.getTable(sysCat)) {
+                                // Process a batch of ttl values
+                                moveTTLTable.batch(puts, putResults);
+                                int numMoved = 0;
+                                for (Object putResult : putResults) {
+                                    if (java.util.Objects.nonNull(putResult)) {
+                                        numMoved++;
+                                    }
+                                }
+                                numOfTableThatHasTTLMoved += numMoved;
+                            } catch (Exception e) {
+                                LOGGER.error(String.format(
+                                        "Failed moving ttl value batch from 
ColumnDescriptor to TTL" +
+                                                " column on %s with Exception 
:",
+                                        SYSTEM_CATALOG_NAME), e);
+                            }
+                        }
+
+                        pageMore = count != 0;
+                        LOGGER.info(String.format("moveTTLValues From 
ColumnDescriptor to TTL Column is " +
+                                        "in progress => numOfTableHasTTLMoved: 
%d",
+                                numOfTableThatHasTTLMoved));
+
+                    }
+                } while (pageMore);
+            } catch (IOException ioe) {
+                LOGGER.error(String.format(
+                        "Failed moving ttl value batch from ColumnDescriptor 
to TTL" +
+                                " column in %s with Exception :",
+                        SYSTEM_CATALOG_NAME), ioe);
+                throw ioe;
+            }
+            LOGGER.info(String.format("Finished moving ttl value batch from 
ColumnDescriptor to TTL " +
+                            "column on %s ",
+                    SYSTEM_CATALOG_NAME));
+
+    }
+
     public static void addViewIndexToParentLinks(PhoenixConnection 
oldMetaConnection) throws SQLException {
         PhoenixConnection metaConn = null;
         boolean isMetaConnUsingQueryConn = true;


Reply via email to