This is an automated email from the ASF dual-hosted git repository.

jisaac pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new bdba490f43 PHOENIX-7507 Update ROW_KEY_MATCHER column type to be 
VARBINARY_ENCODED (#2060)
bdba490f43 is described below

commit bdba490f4397c186cf1442bf727b5a647f01e84a
Author: Jacob Isaac <[email protected]>
AuthorDate: Mon Feb 3 16:57:44 2025 -0800

    PHOENIX-7507 Update ROW_KEY_MATCHER column type to be VARBINARY_ENCODED 
(#2060)
---
 .../coprocessorclient/MetaDataProtocol.java        |   2 +
 .../org/apache/phoenix/query/QueryConstants.java   |   2 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  |   2 +-
 .../java/org/apache/phoenix/schema/PTableImpl.java | 134 +++++-----
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  17 +-
 .../phoenix/end2end/BaseRowKeyMatcherTestIT.java   |  67 ++++-
 .../org/apache/phoenix/end2end/BaseViewTTLIT.java  | 280 ++++++++++++++++++++-
 .../java/org/apache/phoenix/end2end/ViewTTLIT.java |  15 +-
 .../end2end/ViewTTLWithLongViewIndexEnabledIT.java |   8 +-
 9 files changed, 431 insertions(+), 96 deletions(-)

diff --git 
a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataProtocol.java
 
b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataProtocol.java
index 46d7888beb..6a7a5b859e 100644
--- 
a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataProtocol.java
+++ 
b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataProtocol.java
@@ -114,6 +114,8 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
     public static final int MIN_CLIENT_RETRY_INDEX_WRITES = 
VersionUtil.encodeVersion("4", "14", "0");
     public static final int MIN_TX_CLIENT_SIDE_MAINTENANCE = 
VersionUtil.encodeVersion("4", "14", "0");
     public static final int MIN_PENDING_DISABLE_INDEX = 
VersionUtil.encodeVersion("4", "14", "0");
+    // The minimum client version that allows VARBINARY_ENCODED columns
+    public static final int MIN_VERSION_ALLOW_VBE_COLUMNS = 
VersionUtil.encodeVersion("5", "3", "0");
     // Version below which we should turn off essential column family.
     public static final int ESSENTIAL_FAMILY_VERSION_THRESHOLD = 
VersionUtil.encodeVersion("0", "94", "7");
     /** Version below which we fall back on the generic KeyValueBuilder */
diff --git 
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryConstants.java
 
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 14e9b05fff..d59249fb58 100644
--- 
a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ 
b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -406,7 +406,7 @@ public interface QueryConstants {
             MAX_LOOKBACK_AGE + " BIGINT, \n" +
             CDC_INCLUDE_TABLE + " VARCHAR, \n" +
             TTL + " VARCHAR, \n" +
-            ROW_KEY_MATCHER + " VARBINARY, \n" +
+            ROW_KEY_MATCHER + " VARBINARY_ENCODED, \n" +
             // Column metadata (will be null for table row)
             DATA_TYPE + " INTEGER," +
             COLUMN_SIZE + " INTEGER," +
diff --git 
a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
 
b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 598bdf21f5..243b13ff78 100644
--- 
a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ 
b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -3656,7 +3656,7 @@ public class MetaDataClient {
 
             if ((rowKeyMatcher == null) ||
                     Bytes.compareTo(rowKeyMatcher, 
HConstants.EMPTY_BYTE_ARRAY) == 0) {
-                tableUpsert.setNull(38, Types.VARBINARY);
+                tableUpsert.setNull(38, PDataType.VARBINARY_ENCODED_TYPE);
             } else {
                 tableUpsert.setBytes(38, rowKeyMatcher);
             }
diff --git 
a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 8caa0f72e6..832fa2b6d5 100644
--- 
a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ 
b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -68,6 +68,7 @@ import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.coprocessor.generated.DynamicColumnMetaDataProtos;
 import org.apache.phoenix.coprocessor.generated.PTableProtos;
+import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
 import org.apache.phoenix.exception.DataExceedsCapacityException;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
@@ -77,6 +78,7 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.parse.ParseNode;
@@ -122,7 +124,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -130,72 +131,10 @@ import java.util.Map.Entry;
 import java.util.Set;
 import javax.annotation.Nonnull;
 
-import javax.annotation.Nonnull;
-
 import org.apache.phoenix.schema.types.PVarbinary;
-import 
org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.ByteStringer;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.compile.ExpressionCompiler;
-import org.apache.phoenix.compile.FromCompiler;
-import org.apache.phoenix.compile.QueryPlan;
-import org.apache.phoenix.compile.StatementContext;
-import org.apache.phoenix.coprocessor.generated.DynamicColumnMetaDataProtos;
-import org.apache.phoenix.coprocessor.generated.PTableProtos;
-import org.apache.phoenix.exception.DataExceedsCapacityException;
-import org.apache.phoenix.expression.Expression;
-import org.apache.phoenix.expression.LiteralExpression;
-import org.apache.phoenix.expression.SingleCellConstructorExpression;
-import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
-import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.index.IndexMaintainer;
-import org.apache.phoenix.parse.ParseNode;
-import org.apache.phoenix.parse.SQLParser;
-import org.apache.phoenix.protobuf.ProtobufUtil;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
-import org.apache.phoenix.schema.transform.TransformMaintainer;
-import org.apache.phoenix.schema.types.PBinary;
-import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PDouble;
-import org.apache.phoenix.schema.types.PFloat;
 import org.apache.phoenix.schema.types.PVarbinaryEncoded;
-import org.apache.phoenix.schema.types.PVarchar;
 
-import 
org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.phoenix.thirdparty.com.google.common.base.Objects;
-import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
-import org.apache.phoenix.thirdparty.com.google.common.base.Strings;
-import 
org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap;
-import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
-import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap;
-import 
org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableSortedMap;
-import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Sets;
-import org.apache.phoenix.transaction.TransactionFactory;
-import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.CDCUtil;
-import org.apache.phoenix.util.EncodedColumnsUtil;
-import org.apache.phoenix.util.MetaDataUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.util.SizedUtil;
-import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 
 /**
  *
@@ -2218,6 +2157,10 @@ public class PTableImpl implements PTable {
     }
 
     public static PTableProtos.PTable toProto(PTable table) {
+        return toProto(table, MetaDataProtocol.MIN_VERSION_ALLOW_VBE_COLUMNS);
+    }
+
+    public static PTableProtos.PTable toProto(PTable table, long 
clientVersion) {
         PTableProtos.PTable.Builder builder = PTableProtos.PTable.newBuilder();
         if (table.getTenantId() != null) {
             
builder.setTenantId(ByteStringer.wrap(table.getTenantId().getBytes()));
@@ -2259,17 +2202,32 @@ public class PTableImpl implements PTable {
         }
         List<PColumn> columns = table.getColumns();
         int columnSize = columns.size();
-        for (int i = offset; i < columnSize; i++) {
-            PColumn column = columns.get(i);
-            builder.addColumns(PColumnImpl.toProto(column));
+        // check whether we need the backward compatibility check.
+        boolean
+                checkBackwardCompatibility =
+                (clientVersion < 
MetaDataProtocol.MIN_VERSION_ALLOW_VBE_COLUMNS)
+                        && (table.getSchemaName().getString()
+                        
.equalsIgnoreCase(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME));
+        if (checkBackwardCompatibility) {
+            for (int i = offset; i < columnSize; i++) {
+                PColumn column = columns.get(i);
+                
builder.addColumns(PColumnImpl.toProto(getBackwardCompatibleColumn(
+                        column,
+                        table.getTableName().getString())));
+            }
+        } else {
+            for (int i = offset; i < columnSize; i++) {
+                PColumn column = columns.get(i);
+                builder.addColumns(PColumnImpl.toProto(column));
+            }
         }
         List<PTable> indexes = table.getIndexes();
         for (PTable curIndex : indexes) {
-            builder.addIndexes(toProto(curIndex));
+            builder.addIndexes(toProto(curIndex, clientVersion));
         }
         PTable transformingNewTable = table.getTransformingNewTable();
         if (transformingNewTable != null) {
-            builder.setTransformingNewTable(toProto(transformingNewTable));
+            builder.setTransformingNewTable(toProto(transformingNewTable, 
clientVersion));
         }
         builder.setIsImmutableRows(table.isImmutableRows());
         // TODO remove this field in 5.0 release
@@ -2516,6 +2474,46 @@ public class PTableImpl implements PTable {
         return ancestorLastDDLTimestampMap;
     }
 
+    // Helper method for creating backward compatible PColumn object for 
columns that introduced the
+    // VARBINARY_ENCODED field in release version 5.3.0.
+    // Without this adjustment the older clients would get the following 
exception -
+    // "Error: org.apache.phoenix.schema.IllegalDataException: 
java.sql.SQLException:
+    // ERROR 201 (22000): Illegal data. Unsupported sql type: 
VARBINARY_ENCODED"
+    // The following columns were introduced as part of the 5.3.0 release
+    // SYSTEM.CATALOG.ROW_KEY_MATCHER
+    // SYSTEM.CDC_STREAM.PARTITION_START_KEY
+    // SYSTEM.CDC_STREAM.PARTITION_END_KEY
+    private static PColumn getBackwardCompatibleColumn(PColumn column, String 
tableName) {
+
+                // SYSTEM.CATALOG.ROW_KEY_MATCHER
+        if 
((tableName.equalsIgnoreCase(PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE)
+                && 
column.getName().getString().equalsIgnoreCase(PhoenixDatabaseMetaData.ROW_KEY_MATCHER))
+                // SYSTEM.CDC_STREAM.PARTITION_START_KEY
+                || 
(tableName.equalsIgnoreCase(PhoenixDatabaseMetaData.SYSTEM_CDC_STREAM_TABLE)
+                && 
column.getName().getString().equalsIgnoreCase(PhoenixDatabaseMetaData.PARTITION_START_KEY))
+                // SYSTEM.CDC_STREAM.PARTITION_END_KEY
+                || 
(tableName.equalsIgnoreCase(PhoenixDatabaseMetaData.SYSTEM_CDC_STREAM_TABLE)
+                && 
column.getName().getString().equalsIgnoreCase(PhoenixDatabaseMetaData.PARTITION_END_KEY)))
 {
+            return new PColumnImpl(column.getName(),
+                    column.getFamilyName(),
+                    PVarbinary.INSTANCE,
+                    column.getMaxLength(),
+                    column.getScale(),
+                    column.isNullable(),
+                    column.getPosition(),
+                    column.getSortOrder(),
+                    column.getArraySize(),
+                    column.getViewConstant(),
+                    column.isViewReferenced(),
+                    column.getExpressionStr(),
+                    column.isRowTimestamp(),
+                    column.isDynamic(),
+                    column.getColumnQualifierBytes(),
+                    column.getTimestamp());
+        }
+        return column;
+    }
+
     private void buildIndexWhereExpression(PhoenixConnection connection) 
throws SQLException {
         PhoenixPreparedStatement
                 pstmt =
diff --git 
a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 2cb00501d8..49a55d78af 100644
--- 
a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -757,7 +757,7 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
             // the PTable of views and indexes on views might get updated 
because a column is added to one of
             // their parents (this won't change the timestamp)
             if (table.getType() != PTableType.TABLE || table.getTimeStamp() != 
tableTimeStamp) {
-                builder.setTable(PTableImpl.toProto(table));
+                builder.setTable(PTableImpl.toProto(table, 
request.getClientVersion()));
             }
             done.run(builder.build());
         } catch (Throwable t) {
@@ -973,11 +973,10 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
         PDataType dataType =
                 PDataType.fromTypeId(PInteger.INSTANCE.getCodec().decodeInt(
                         dataTypeKv.getValueArray(), 
dataTypeKv.getValueOffset(), SortOrder.getDefault()));
+
         if (maxLength == null && dataType == PBinary.INSTANCE) {
-            dataType = PVarbinary.INSTANCE;   // For
+            dataType = PVarbinary.INSTANCE;   // For Backward compatibility
         }
-        // backward
-        // compatibility.
         Cell sortOrderKv = colKeyValues[SORT_ORDER_INDEX];
         SortOrder sortOrder =
                 sortOrderKv == null ? SortOrder.getDefault() : 
SortOrder.fromSystemValue(PInteger.INSTANCE
@@ -1666,7 +1665,7 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
                     || (oldTable != null &&
                     oldTable.getBucketNum() != null && oldTable.getBucketNum() 
> 0);
                 addColumnToTable(columnCellList, colName, famName, 
colKeyValues, columns,
-                    isSalted, baseColumnCount, isRegularView, columnTimestamp);
+                        isSalted, baseColumnCount, isRegularView, 
columnTimestamp);
             }
         }
         if (tableType == INDEX && ! isThisAViewIndex) {
@@ -2395,14 +2394,14 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
                     if (!isTableDeleted(table)) {
                         
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
                         
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
-                        builder.setTable(PTableImpl.toProto(table));
+                        builder.setTable(PTableImpl.toProto(table, 
clientVersion));
                         done.run(builder.build());
                         return;
                     }
                 } else {
                     
builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_TABLE_FOUND);
                     
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
-                    builder.setTable(PTableImpl.toProto(table));
+                    builder.setTable(PTableImpl.toProto(table, clientVersion));
                     done.run(builder.build());
                     return;
                 }
@@ -2823,7 +2822,7 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
                 PTable newTable = buildTable(tableKey, cacheKey, region,
                     clientTimeStamp, clientVersion);
                 if (newTable != null) {
-                    builder.setTable(PTableImpl.toProto(newTable));
+                    builder.setTable(PTableImpl.toProto(newTable, 
clientVersion));
                 }
 
                 done.run(builder.build());
@@ -4594,7 +4593,7 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
                 
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
                 builder.setMutationTime(currentTime);
                 if (returnTable != null) {
-                    builder.setTable(PTableImpl.toProto(returnTable));
+                    builder.setTable(PTableImpl.toProto(returnTable, 
request.getClientVersion()));
                 }
                 done.run(builder.build());
                 return;
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseRowKeyMatcherTestIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseRowKeyMatcherTestIT.java
index 4fcf9b276a..0ce628b034 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseRowKeyMatcherTestIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseRowKeyMatcherTestIT.java
@@ -58,6 +58,7 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.ResultTuple;
+import org.apache.phoenix.schema.types.PBinary;
 import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDate;
@@ -67,13 +68,17 @@ import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.types.PSmallint;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PTinyint;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.schema.types.PVarbinaryEncoded;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.ViewUtil;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -153,6 +158,12 @@ public abstract class BaseRowKeyMatcherTestIT extends 
ParallelStatsDisabledIT {
         case Types.TIMESTAMP:
             pkTypeStr = "TIMESTAMP";
             break;
+        case Types.VARBINARY:
+            pkTypeStr = "VARBINARY";
+            break;
+        case PDataType.VARBINARY_ENCODED_TYPE:
+            pkTypeStr = "VARBINARY_ENCODED";
+            break;
         default:
             pkTypeStr = "VARCHAR(25)";
         }
@@ -183,6 +194,15 @@ public abstract class BaseRowKeyMatcherTestIT extends 
ParallelStatsDisabledIT {
         case Types.TIMESTAMP:
             //pkTypeStr = "TIMESTAMP";
             return new Timestamp(System.currentTimeMillis() + 
rnd.nextInt(50000));
+        case Types.VARBINARY:
+            // pkTypeStr = "VARBINARY";
+        case PDataType.VARBINARY_ENCODED_TYPE:
+            // pkTypeStr = "VARBINARY_ENCODED";
+            byte[] varBytes = ByteUtil.concat(
+                    RandomStringUtils.randomAlphanumeric(25).getBytes(),
+                    Bytes.toBytes(rnd.nextInt(50000)),
+                    Bytes.toBytes(Math.floor(rnd.nextInt(50000) * 
rnd.nextDouble())));
+            return PVarbinary.INSTANCE.toStringLiteral(varBytes);
         default:
             // pkTypeStr = "VARCHAR(25)";
             return RandomStringUtils.randomAlphanumeric(25);
@@ -271,17 +291,21 @@ public abstract class BaseRowKeyMatcherTestIT extends 
ParallelStatsDisabledIT {
                                 "CONSTRAINT pk PRIMARY KEY (ID1 %s, ID2 %s, 
ID3 %s, ROW_ID)) " +
                                 "AS SELECT * FROM %s WHERE KP = '%s'";
 
-                cstmt.execute(String.format(VIEW_TEMPLATE, globalViewName, 
pkType1Str, pkType2Str,
+                String globalViewSQL = String.format(VIEW_TEMPLATE, 
globalViewName, pkType1Str, pkType2Str,
                         pkType3Str, pkOrders[0].name(), pkOrders[1].name(), 
pkOrders[2].name(),
-                        baseTableName, partitionName));
+                        baseTableName, partitionName);
+                LOGGER.info("Created global view {}", globalViewSQL);
+                cstmt.execute(globalViewSQL);
                 if (hasGlobalViewIndexes) {
                     String indexNamePrefix = String.format("G%s", partition);
                     String
                             GLOBAL_INDEX_TEMPLATE =
                             "CREATE INDEX IF NOT EXISTS %s_COL2_INDEX ON %s 
(COL2) " +
                                     "INCLUDE(SYSTEM_MODSTAMP)";
-                    cstmt.execute(
-                            String.format(GLOBAL_INDEX_TEMPLATE, 
indexNamePrefix, globalViewName));
+                    String globalViewIndexSQL =
+                            String.format(GLOBAL_INDEX_TEMPLATE, 
indexNamePrefix, globalViewName);
+                    LOGGER.info("Created global view index {}", 
globalViewIndexSQL);
+                    cstmt.execute(globalViewIndexSQL);
                 }
 
                 return 
getRowKeyMatchersFromView(globalConnection.unwrap(PhoenixConnection.class),
@@ -320,12 +344,16 @@ public abstract class BaseRowKeyMatcherTestIT extends 
ParallelStatsDisabledIT {
                                 "CONSTRAINT pk PRIMARY KEY (ZID)) " + "AS 
SELECT * FROM %s %s %s";
                 String VIEW_WO_PK_TEMPLATE = "CREATE VIEW IF NOT EXISTS %s AS 
SELECT * from %s %s";
                 if (extendPK) {
-                    cstmt.execute(
+                    String viewWithPKSQL =
                             String.format(VIEW_WITH_PK_TEMPLATE, 
tenantViewName, globalViewName,
-                                    getWhereClause(pkNames, pkTypes), 
tenantViewOptions));
+                                    getWhereClause(pkNames, pkTypes), 
tenantViewOptions);
+                    LOGGER.info("Created tenant view (WITH_PK) {}", 
viewWithPKSQL);
+                    cstmt.execute(viewWithPKSQL);
                 } else {
-                    cstmt.execute(String.format(VIEW_WO_PK_TEMPLATE, 
tenantViewName, globalViewName,
-                            tenantViewOptions));
+                    String viewWithoutPKSQL = 
String.format(VIEW_WO_PK_TEMPLATE, tenantViewName, globalViewName,
+                            tenantViewOptions);
+                    LOGGER.info("Created tenant view ((WO_PK)) {}", 
viewWithoutPKSQL);
+                    cstmt.execute(viewWithoutPKSQL);
                 }
                 return 
getRowKeyMatchersFromView(tenantConnection.unwrap(PhoenixConnection.class),
                         tenantViewName);
@@ -524,12 +552,14 @@ public abstract class BaseRowKeyMatcherTestIT extends 
ParallelStatsDisabledIT {
                         view.getTableName().getString());
         byte[]
                 rowKeyMatcher1 =
-                WhereOptimizer.getRowKeyMatcher(viewStatementContext, 
tableName, viewStatementTable,
-                        whereExpression);
+                PVarbinaryEncoded.INSTANCE.toBytes(
+                        WhereOptimizer.getRowKeyMatcher(viewStatementContext, 
tableName,
+                                viewStatementTable, whereExpression));
         byte[]
                 rowKeyMatcher2 =
-                WhereOptimizer.getRowKeyMatcher(connection, tableName, 
viewStatementTable,
-                        viewColumnConstantsToBe, isViewColumnReferencedToBe);
+                PVarbinaryEncoded.INSTANCE.toBytes(
+                        WhereOptimizer.getRowKeyMatcher(connection, tableName, 
viewStatementTable,
+                                viewColumnConstantsToBe, 
isViewColumnReferencedToBe));
         LOGGER.debug(String.format(
                 "target-view-name = %s, physical = %s, stmt-table = %s\n, " +
                         "row-matcher-0 = %s (syscat)\n, row-matcher-1 = %s\n, 
row-matcher-2 = %s\n",
@@ -634,6 +664,17 @@ public abstract class BaseRowKeyMatcherTestIT extends 
ParallelStatsDisabledIT {
                 builder.append(pkNames[b]).append(" = ")
                         .append(" TO_TIMESTAMP('2019-10-27T16:17:57+00:00') ");
                 break;
+            case Types.VARBINARY:
+                // pkTypeStr = "VARBINARY";
+            case PDataType.VARBINARY_ENCODED_TYPE:
+                // pkTypeStr = "VARBINARY_ENCODED";
+                byte[] varBytes = ByteUtil.concat(
+                        RandomStringUtils.randomAlphanumeric(25).getBytes(),
+                        Bytes.toBytes(rnd.nextInt(50000)),
+                        Bytes.toBytes(Math.floor(rnd.nextInt(50000) * 
rnd.nextDouble())));
+                builder.append(pkNames[b]).append(" = ")
+                        .append(PVarbinary.INSTANCE.toStringLiteral(varBytes));
+                break;
             default:
                 // pkTypeStr = "VARCHAR(25)";
                 builder.append(pkNames[b]).append("=").append("'")
@@ -755,6 +796,8 @@ public abstract class BaseRowKeyMatcherTestIT extends 
ParallelStatsDisabledIT {
         // Test Case 7: PK1 = Varchar, PK2 = Varchar, PK3 = Integer
         // last PK cannot be of variable length when creating a view on top of 
it
         testCases.add(new PDataType[] { PVarchar.INSTANCE, PVarchar.INSTANCE, 
PInteger.INSTANCE });
+        // Test Case 8: PK1 = VARBINARY_ENCODED, PK2 = Varchar, PK3 = 
VARBINARY_ENCODED
+        testCases.add(new PDataType[] { PVarbinaryEncoded.INSTANCE, 
PVarchar.INSTANCE, PVarbinaryEncoded.INSTANCE });
 
         return testCases;
     }
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewTTLIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewTTLIT.java
index b206fba04c..58ef1441ba 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewTTLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewTTLIT.java
@@ -18,6 +18,7 @@
 
 package org.apache.phoenix.end2end;
 
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.filter.QualifierFilter;
 import org.apache.hadoop.hbase.filter.RowFilter;
 import org.apache.hadoop.hbase.filter.SubstringComparator;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.PhoenixTestBuilder;
@@ -59,14 +61,28 @@ import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PBinary;
+import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.schema.types.PVarbinaryEncoded;
+import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.thirdparty.com.google.common.base.Joiner;
 import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
 import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.ManualEnvironmentEdge;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.After;
 import org.junit.Before;
@@ -81,6 +97,7 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -164,6 +181,111 @@ public abstract class BaseViewTTLIT extends 
ParallelStatsDisabledIT {
         }
     }
 
+    private SortOrder[][] getSortOrders() {
+        SortOrder[][]
+                sortOrders =
+                new SortOrder[][] {
+                        { SortOrder.ASC, SortOrder.ASC, SortOrder.ASC },
+                        { SortOrder.ASC, SortOrder.ASC, SortOrder.DESC },
+                        { SortOrder.ASC, SortOrder.DESC, SortOrder.ASC },
+                        { SortOrder.ASC, SortOrder.DESC, SortOrder.DESC },
+                        { SortOrder.DESC, SortOrder.ASC, SortOrder.ASC },
+                        { SortOrder.DESC, SortOrder.ASC, SortOrder.DESC },
+                        { SortOrder.DESC, SortOrder.DESC, SortOrder.ASC },
+                        { SortOrder.DESC, SortOrder.DESC, SortOrder.DESC }
+                };
+        return sortOrders;
+    }
+
+
+    private List<PDataType[]> getTestCases() {
+
+        List<PDataType[]> testCases = new ArrayList<>();
+        // Test Case 1: PK1 = Integer, PK2 = Integer, PK3 = Integer
+        testCases.add(new PDataType[] { PInteger.INSTANCE, PInteger.INSTANCE, 
PInteger.INSTANCE });
+        // Test Case 2: PK1 = Long, PK2 = Long, PK3 = Long
+        testCases.add(new PDataType[] { PLong.INSTANCE, PLong.INSTANCE, 
PLong.INSTANCE });
+        // Test Case 3: PK1 = Timestamp, PK2 = Timestamp, PK3 = Timestamp
+        testCases.add(
+                new PDataType[] { PTimestamp.INSTANCE, PTimestamp.INSTANCE, 
PTimestamp.INSTANCE });
+        // Test Case 4: PK1 = Char, PK2 = Char, PK3 = Char
+        testCases.add(new PDataType[] { PChar.INSTANCE, PChar.INSTANCE, 
PChar.INSTANCE });
+        // Test Case 5: PK1 = Decimal, PK2 = Decimal, PK3 = Integer
+        // last PK cannot be of variable length when creating a view on top of 
it
+        testCases.add(new PDataType[] { PDecimal.INSTANCE, PDecimal.INSTANCE, 
PInteger.INSTANCE });
+        // Test Case 6: PK1 = Date, PK2 = Date, PK3 = Date
+        testCases.add(new PDataType[] { PDate.INSTANCE, PDate.INSTANCE, 
PDate.INSTANCE });
+        // Test Case 7: PK1 = Varchar, PK2 = Varchar, PK3 = Integer
+        // last PK cannot be of variable length when creating a view on top of 
it
+        testCases.add(new PDataType[] { PVarchar.INSTANCE, PVarchar.INSTANCE, 
PInteger.INSTANCE });
+
+        // Test Case 8: PK1 = VARBINARY_ENCODED, PK2 = Varchar, PK3 = 
VARBINARY_ENCODED
+        testCases.add(new PDataType[] { PVarbinaryEncoded.INSTANCE, 
PVarchar.INSTANCE, PInteger.INSTANCE });
+        return testCases;
+    }
+
+    private String getWhereClause(String[] pkNames, PDataType[] testPKTypes) {
+
+        StringBuilder builder = new StringBuilder("WHERE ");
+        Random rnd = new Random();
+
+        for (int b = 0; b < testPKTypes.length; b++) {
+            if (b > 0) builder.append(" AND ");
+            switch (testPKTypes[b].getSqlType()) {
+            case Types.VARCHAR: {
+                // pkTypeStr = "VARCHAR(25)";
+                builder.append(pkNames[b]).append(" = ").append("'")
+                        
.append(RandomStringUtils.randomAlphanumeric(25)).append("'");
+                break;
+            }
+            case Types.CHAR: {
+                //pkTypeStr = "CHAR(15)";
+                builder.append(pkNames[b]).append(" = ").append("'")
+                        
.append(RandomStringUtils.randomAlphanumeric(15)).append("'");
+                break;
+            }
+            case Types.DECIMAL:
+                //pkTypeStr = "DECIMAL(8,2)";
+                builder.append(pkNames[b]).append(" = 
").append(rnd.nextDouble());
+                break;
+            case Types.INTEGER:
+                //pkTypeStr = "INTEGER";
+                builder.append(pkNames[b]).append(" = 
").append(rnd.nextInt(500000));
+                break;
+            case Types.BIGINT:
+                //pkTypeStr = "BIGINT";
+                builder.append(pkNames[b]).append(" = 
").append(rnd.nextLong());
+                break;
+            case Types.DATE:
+                //pkTypeStr = "DATE";
+                builder.append(pkNames[b]).append(" = ")
+                        .append(" TO_DATE('2022-03-21T15:03:57+00:00') ");
+                break;
+            case Types.TIMESTAMP:
+                //pkTypeStr = "TIMESTAMP";
+                builder.append(pkNames[b]).append(" = ")
+                        .append(" TO_TIMESTAMP('2019-10-27T16:17:57+00:00') ");
+                break;
+            case Types.VARBINARY:
+                // pkTypeStr = "VARBINARY";
+            case PDataType.VARBINARY_ENCODED_TYPE:
+                // pkTypeStr = "VARBINARY_ENCODED";
+                byte[] varBytes = ByteUtil.concat(
+                        RandomStringUtils.randomAlphanumeric(25).getBytes(),
+                        Bytes.toBytes(rnd.nextInt(50000)),
+                        Bytes.toBytes(Math.floor(rnd.nextInt(50000) * 
rnd.nextDouble())));
+                builder.append(pkNames[b]).append(" = ")
+                        .append(PVarbinary.INSTANCE.toStringLiteral(varBytes));
+                break;
+            default:
+                // pkTypeStr = "VARCHAR(25)";
+                builder.append(pkNames[b]).append("=").append("'")
+                        
.append(RandomStringUtils.randomAlphanumeric(15)).append("'");
+            }
+        }
+        return builder.toString();
+    }
+
     private void clearCache(boolean globalFixNeeded, boolean tenantFixNeeded, 
List<String> allTenants)
             throws SQLException {
 
@@ -2347,6 +2469,162 @@ public abstract class BaseViewTTLIT extends 
ParallelStatsDisabledIT {
 
     }
 
+    protected void testMajorCompactTenantViewsWithVariousPKTypesAndSortOrder() 
throws Exception {
+        try {
+            List<PDataType[]> testCases = getTestCases();
+            SortOrder[][] sortOrders = getSortOrders();
+            for (PDataType[] aCase : testCases) {
+                for (SortOrder[] sortOrder : sortOrders) {
+                    runTenantViewsWithVariousPKTypes(aCase, sortOrder);
+                }
+            }
+        } catch (Exception e) {
+            LOGGER.info(LogUtil.getCallerStackTrace());
+            LOGGER.error(e.getMessage());
+        }
+
+    }
+
+    /**
+     * Test special case:
+     * Test with various PK Types and SortOrders
+     * This can occur when the TENANT_ID and global view PARTITION_KEY overlap.
+     * @throws Exception
+     */
+
+    private void runTenantViewsWithVariousPKTypes(PDataType[] pkDataTypes, 
SortOrder[] sortOrders) throws Exception {
+        // View TTL is set in seconds (for e.g 10 secs)
+        resetEnvironmentEdgeManager();
+        int viewTTL = VIEW_TTL_10_SECS;
+        // Define the test schema.
+        // 1. Table with columns => (ORG_ID, KP, COL1, COL2, COL3), PK => 
(ORG_ID, KP)
+        // 2. GlobalView with columns => (ID1, ID2, ID3, COL4, COL5, COL6), PK 
=> (ID1, ID2, ID3)
+        // 3. Tenant with columns => (ZID, COL7, COL8, COL9), PK => (ZID)
+        final SchemaBuilder schemaBuilder = new SchemaBuilder(getUrl());
+
+        TableOptions
+                tableOptions = TableOptions.withDefaults();
+        tableOptions.setTableProps("");
+        
tableOptions.setTableProps("COLUMN_ENCODED_BYTES=0,MULTI_TENANT=true,DEFAULT_COLUMN_FAMILY='0'");
+        tableOptions.setTablePKColumns(Arrays.asList("OID", "KP"));
+        tableOptions.setTablePKColumnTypes(Arrays.asList("CHAR(15)", 
"CHAR(3)"));
+
+        DataOptions dataOptions = DataOptions.withDefaults();
+        dataOptions.setTenantViewName("Z01");
+        dataOptions.setKeyPrefix("Z01");
+
+        GlobalViewOptions
+                globalViewOptions = GlobalViewOptions.withDefaults();
+        String[] globalViewPKNames = new String[] { "ID1", "ID2", "ID3" };
+        globalViewOptions.setGlobalViewPKColumns(asList(globalViewPKNames));
+        globalViewOptions.setGlobalViewPKColumnTypes(
+                asList(
+                        pkDataTypes[0].getSqlTypeName(),
+                        pkDataTypes[1].getSqlTypeName(),
+                        pkDataTypes[2].getSqlTypeName()
+                )
+        );
+        globalViewOptions.setGlobalViewPKColumnSort(asList(
+                sortOrders[0].name(),
+                sortOrders[1].name(),
+                sortOrders[2].name()
+        ));
+
+        TenantViewOptions
+                tenantViewOptions = TenantViewOptions.withDefaults();
+        tenantViewOptions.setTenantViewCondition(String.format(
+                "SELECT * FROM %s.%s %s",
+                dataOptions.getSchemaName(), dataOptions.getGlobalViewName(),
+                getWhereClause(globalViewPKNames, pkDataTypes)));
+        // View TTL is set to 10s => 10000 ms
+        tenantViewOptions.setTableProps(String.format("TTL=%d", viewTTL));
+
+        schemaBuilder.withTableOptions(tableOptions)
+                .withTenantViewOptions(tenantViewOptions);
+
+        schemaBuilder
+                .withDataOptions(dataOptions)
+                .withGlobalViewOptions(globalViewOptions)
+                .buildWithNewTenant();
+
+        // Define the test data.
+        DataSupplier
+                dataSupplier = new DataSupplier() {
+
+            @Override public List<Object> getValues(int rowIndex) {
+                Random rnd = new Random();
+                String zid = String.format(ZID_FMT, rowIndex);
+                String col1 = String.format(COL1_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+                String col2 = String.format(COL2_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+                String col3 = String.format(COL3_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+                String col4 = String.format(COL4_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+                String col5 = String.format(COL5_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+                String col6 = String.format(COL6_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+                String col7 = String.format(COL7_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+                String col8 = String.format(COL8_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+                String col9 = String.format(COL9_FMT, rowIndex + 
rnd.nextInt(MAX_ROWS));
+
+                return Lists.newArrayList(
+                        new Object[] { col1, col2, col3, col4, col5, col6,
+                                zid, col7, col8, col9 });
+            }
+        };
+
+        long earliestTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+        // Create a test data reader/writer for the above schema.
+        DataWriter
+                dataWriter = new BasicDataWriter();
+        DataReader
+                dataReader = new BasicDataReader();
+
+        List<String> columns =
+                Lists.newArrayList(
+                        "COL1", "COL2", "COL3", "COL4", "COL5",
+                        "COL6", "ZID", "COL7", "COL8", "COL9");
+        List<String> rowKeyColumns = Lists.newArrayList("ZID");
+        String tenantConnectUrl =
+                getUrl() + ';' + TENANT_ID_ATTRIB + '=' +
+                        schemaBuilder.getDataOptions().getTenantId();
+        try (Connection writeConnection = DriverManager
+                .getConnection(tenantConnectUrl)) {
+            writeConnection.setAutoCommit(true);
+            dataWriter.setConnection(writeConnection);
+            dataWriter.setDataSupplier(dataSupplier);
+            dataWriter.setUpsertColumns(columns);
+            dataWriter.setRowKeyColumns(rowKeyColumns);
+            
dataWriter.setTargetEntity(schemaBuilder.getEntityTenantViewName());
+            
org.apache.phoenix.thirdparty.com.google.common.collect.Table<String, String, 
Object>
+                    upsertedData =
+                    upsertData(dataWriter, DEFAULT_NUM_ROWS);
+
+            dataReader.setValidationColumns(columns);
+            dataReader.setRowKeyColumns(rowKeyColumns);
+            dataReader.setDML(String
+                    .format("SELECT %s from %s", Joiner.on(",").join(columns),
+                            schemaBuilder.getEntityTenantViewName()));
+            
dataReader.setTargetEntity(schemaBuilder.getEntityTenantViewName());
+            long scnTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+
+            validateExpiredRowsAreNotReturnedUsingData(viewTTL, upsertedData,
+                    dataReader, schemaBuilder);
+        }
+
+
+
+        PTable table = schemaBuilder.getBaseTable();
+        // validate multi-tenanted base table
+        validateAfterMajorCompaction(
+                table.getSchemaName().toString(),
+                table.getTableName().toString(),
+                false,
+                earliestTimestamp,
+                viewTTL,
+                false,
+                0
+        );
+    }
+
+
     /**
      * Test special case:
      * When there are overlapping row key prefixes.
@@ -2354,7 +2632,7 @@ public abstract class BaseViewTTLIT extends 
ParallelStatsDisabledIT {
      * @throws Exception
      */
 
-    protected void testTenantViewsWIthOverlappingRowPrefixes() throws 
Exception {
+    protected void testTenantViewsWithOverlappingRowPrefixes() throws 
Exception {
         // View TTL is set in seconds (for e.g 10 secs)
         int viewTTL = VIEW_TTL_10_SECS;
         // Define the test schema.
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
index d3d04c63c6..617ca0bcce 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
@@ -56,9 +56,14 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PDecimal;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.apache.phoenix.schema.types.PVarbinaryEncoded;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.thirdparty.com.google.common.base.Joiner;
 import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
@@ -79,6 +84,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -1702,8 +1708,13 @@ public class ViewTTLIT extends BaseViewTTLIT {
     }
 
     @Test
-    public void testTenantViewsWIthOverlappingRowPrefixes() throws Exception {
-        super.testTenantViewsWIthOverlappingRowPrefixes();
+    public void testMajorCompactTenantViewsWithVariousPKTypesAndSortOrder() 
throws Exception {
+        super.testMajorCompactTenantViewsWithVariousPKTypesAndSortOrder();
+    }
+
+    @Test
+    public void testTenantViewsWithOverlappingRowPrefixes() throws Exception {
+        super.testTenantViewsWithOverlappingRowPrefixes();
     }
 
 }
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLWithLongViewIndexEnabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLWithLongViewIndexEnabledIT.java
index 102ef9e368..88727ac6cc 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLWithLongViewIndexEnabledIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLWithLongViewIndexEnabledIT.java
@@ -90,12 +90,16 @@ public class ViewTTLWithLongViewIndexEnabledIT extends 
BaseViewTTLIT {
         
super.testMajorCompactWithVariousTenantIdTypesAndRegions(PLong.INSTANCE);
     }
     @Test
+    public void testMajorCompactTenantViewsWithVariousPKTypesAndSortOrder() 
throws Exception {
+        super.testMajorCompactTenantViewsWithVariousPKTypesAndSortOrder();
+    }
+    @Test
     public void testMajorCompactWhenTTLSetForSomeTenants() throws Exception {
         super.testMajorCompactWhenTTLSetForSomeTenants();
     }
     @Test
-    public void testTenantViewsWIthOverlappingRowPrefixes() throws Exception {
-        super.testTenantViewsWIthOverlappingRowPrefixes();
+    public void testTenantViewsWithOverlappingRowPrefixes() throws Exception {
+        super.testTenantViewsWithOverlappingRowPrefixes();
     }
     @Test
     public void testMajorCompactWithGlobalAndTenantViewHierarchy() throws 
Exception {


Reply via email to