Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 0af8b1e32 -> d56fd3c99


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 1634159..aa3e1a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -99,6 +99,9 @@ import org.apache.phoenix.coprocessor.MetaDataEndpointImpl;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.TableInfo;
+import org.apache.phoenix.coprocessor.TableViewFinderResult;
+import org.apache.phoenix.coprocessor.ViewFinder;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
@@ -178,11 +181,6 @@ public class UpgradeUtil {
     private static final String DELETE_LINK = "DELETE FROM " + 
SYSTEM_CATALOG_SCHEMA + "." + SYSTEM_CATALOG_TABLE
             + " WHERE (" + TABLE_SCHEM + "=? OR (" + TABLE_SCHEM + " IS NULL 
AND ? IS NULL)) AND " + TABLE_NAME + "=? AND " + COLUMN_FAMILY + "=? AND " + 
LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
     
-    private static final String GET_VIEWS_QUERY = "SELECT " + TENANT_ID + "," 
+ TABLE_SCHEM + "," + TABLE_NAME
-            + " FROM " + SYSTEM_CATALOG_SCHEMA + "." + SYSTEM_CATALOG_TABLE + 
" WHERE " + COLUMN_FAMILY + " = ? AND "
-            + LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue() 
+ " AND ( " + TABLE_TYPE + "=" + "'"
-            + PTableType.VIEW.getSerializedValue() + "' OR " + TABLE_TYPE + " 
IS NULL) ORDER BY "+TENANT_ID;
-    
     private UpgradeUtil() {
     }
 
@@ -1153,6 +1151,78 @@ public class UpgradeUtil {
         }
     }
     
+    /**
+     * Move child links form SYSTEM.CATALOG to SYSTEM.CHILD_LINK
+     * @param oldMetaConnection caller should take care of closing the passed 
connection appropriately
+     * @throws SQLException
+     */
+    public static void moveChildLinks(PhoenixConnection oldMetaConnection) 
throws SQLException {
+        PhoenixConnection metaConnection = null;
+        try {
+            // Need to use own connection with max time stamp to be able to 
read all data from SYSTEM.CATALOG 
+            metaConnection = new PhoenixConnection(oldMetaConnection, 
HConstants.LATEST_TIMESTAMP);
+            logger.info("Upgrading metadata to add parent to child links for 
views");
+            metaConnection.commit();
+            String createChildLink = "UPSERT INTO SYSTEM.CHILD_LINK(TENANT_ID, 
TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE) " +
+                                        "SELECT TENANT_ID, TABLE_SCHEM, 
TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE " + 
+                                        "FROM SYSTEM.CATALOG " + 
+                                        "WHERE LINK_TYPE = 4";
+            metaConnection.createStatement().execute(createChildLink);
+            metaConnection.commit();
+            String deleteChildLink = "DELETE FROM SYSTEM.CATALOG WHERE 
LINK_TYPE = 4 ";
+            metaConnection.createStatement().execute(deleteChildLink);
+            metaConnection.commit();
+            metaConnection.getQueryServices().clearCache();
+        } finally {
+            if (metaConnection != null) {
+                metaConnection.close();
+            }
+        }
+    }
+    
+    public static void addViewIndexToParentLinks(PhoenixConnection 
oldMetaConnection) throws SQLException {
+       // Need to use own connection with max time stamp to be able to read 
all data from SYSTEM.CATALOG 
+        try (PhoenixConnection queryConn = new 
PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
+                       PhoenixConnection upsertConn = new 
PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP)) {
+            logger.info("Upgrading metadata to add parent links for indexes on 
views");
+                       String indexQuery = "SELECT TENANT_ID, TABLE_SCHEM, 
TABLE_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE LINK_TYPE = "
+                                       + 
LinkType.INDEX_TABLE.getSerializedValue();
+                       String createViewIndexLink = "UPSERT INTO 
SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY, LINK_TYPE) 
VALUES (?,?,?,?,?) ";
+            ResultSet rs = 
queryConn.createStatement().executeQuery(indexQuery);
+            String prevTenantId = null;
+            PhoenixConnection metaConn = queryConn;
+            Properties props = new Properties(queryConn.getClientInfo());
+                       props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(HConstants.LATEST_TIMESTAMP));
+            while (rs.next()) {
+               String tenantId = rs.getString("TENANT_ID");
+                               if (prevTenantId != tenantId) {
+                                       prevTenantId = tenantId;
+                                       
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+                       metaConn = new PhoenixConnection(oldMetaConnection, 
props); 
+               }
+               String schemaName = rs.getString("TABLE_SCHEM");
+               String parentTableName = rs.getString("TABLE_NAME");
+               String fullParentTableName = 
SchemaUtil.getTableName(schemaName, parentTableName);
+               String indexName = rs.getString("COLUMN_FAMILY");
+               PTable table = PhoenixRuntime.getTable(metaConn, 
fullParentTableName);
+               if (table==null) {
+                       throw new TableNotFoundException(fullParentTableName);
+               }
+               if (table.getType().equals(PTableType.VIEW)) {
+                       PreparedStatement prepareStatement = 
upsertConn.prepareStatement(createViewIndexLink);
+                       prepareStatement.setString(1, tenantId);
+                       prepareStatement.setString(2, schemaName);
+                       prepareStatement.setString(3, indexName);
+                       prepareStatement.setString(4, parentTableName);
+                       prepareStatement.setByte(5, 
LinkType.VIEW_INDEX_PARENT_TABLE.getSerializedValue());
+                       prepareStatement.execute();
+                       upsertConn.commit();
+               }
+            }
+            queryConn.getQueryServices().clearCache();
+        }
+    }
+    
     private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection 
metaConnection,
             String tenantId, String schemaName, String viewOrTableName, int 
baseColumnCount)
             throws SQLException {
@@ -1817,10 +1887,11 @@ public class UpgradeUtil {
                         .getTable(SchemaUtil
                                 
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, 
readOnlyProps)
                                 .getName());) {
-            String tableName = SchemaUtil.normalizeIdentifier(srcTable);
-            String schemaName = 
SchemaUtil.getSchemaNameFromFullName(tableName);
+            String fullTableName = SchemaUtil.normalizeIdentifier(srcTable);
+            String schemaName = 
SchemaUtil.getSchemaNameFromFullName(fullTableName);
+            String tableName = 
SchemaUtil.getTableNameFromFullName(fullTableName);
             // Confirm table is not already upgraded
-            PTable table = PhoenixRuntime.getTable(conn, tableName);
+            PTable table = PhoenixRuntime.getTable(conn, fullTableName);
             
             // Upgrade is not required if schemaName is not present.
             if (schemaName.equals("") && !PTableType.VIEW
@@ -1834,21 +1905,38 @@ public class UpgradeUtil {
             String oldPhysicalName = table.getPhysicalName().getString();
             String newPhysicalTablename = SchemaUtil.normalizeIdentifier(
                     SchemaUtil.getPhysicalTableName(oldPhysicalName, 
readOnlyProps).getNameAsString());
-            logger.info(String.format("Upgrading %s %s..", table.getType(), 
tableName));
+            logger.info(String.format("Upgrading %s %s..", table.getType(), 
fullTableName));
             logger.info(String.format("oldPhysicalName %s newPhysicalTablename 
%s..", oldPhysicalName, newPhysicalTablename));
             logger.info(String.format("teanantId %s..", conn.getTenantId()));
+
+            TableViewFinderResult childViewsResult = new 
TableViewFinderResult();
+            try (Table childLinkTable =
+                    conn.getQueryServices()
+                            .getTable(SchemaUtil.getPhysicalName(
+                                
PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, readOnlyProps)
+                                    .getName())) {
+                byte[] tenantId = conn.getTenantId() != null ? 
conn.getTenantId().getBytes() : null;
+                ViewFinder.findAllRelatives(childLinkTable, tenantId, 
schemaName.getBytes(),
+                    tableName.getBytes(), LinkType.CHILD_TABLE, 
childViewsResult);
+            }
+
             // Upgrade the data or main table
-            mapTableToNamespace(admin, metatable, tableName, 
newPhysicalTablename, readOnlyProps,
-                    PhoenixRuntime.getCurrentScn(readOnlyProps), tableName, 
table.getType(),conn.getTenantId());
+            mapTableToNamespace(admin, metatable, fullTableName, 
newPhysicalTablename, readOnlyProps,
+                    PhoenixRuntime.getCurrentScn(readOnlyProps), 
fullTableName, table.getType(),conn.getTenantId());
             // clear the cache and get new table
+            conn.removeTable(conn.getTenantId(), fullTableName,
+                table.getParentName() != null ? 
table.getParentName().getString() : null,
+                table.getTimeStamp());
+            byte[] tenantIdBytes = conn.getTenantId() == null ? 
ByteUtil.EMPTY_BYTE_ARRAY : conn.getTenantId().getBytes();
             conn.getQueryServices().clearTableFromCache(
-                    conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : 
conn.getTenantId().getBytes(),
+                    tenantIdBytes,
                     table.getSchemaName().getBytes(), 
table.getTableName().getBytes(),
                     PhoenixRuntime.getCurrentScn(readOnlyProps));
-            MetaDataMutationResult result = new 
MetaDataClient(conn).updateCache(conn.getTenantId(),schemaName,
-                    SchemaUtil.getTableNameFromFullName(tableName),true);
+            MetaDataMutationResult result =
+                    new MetaDataClient(conn).updateCache(conn.getTenantId(), 
schemaName, tableName,
+                        true);
             if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) 
{ throw new TableNotFoundException(
-              schemaName, tableName); }
+              schemaName, fullTableName); }
             table = result.getTable();
             
             // check whether table is properly upgraded before upgrading 
indexes
@@ -1898,13 +1986,12 @@ public class UpgradeUtil {
                         conn.commit();
                     }
                     conn.getQueryServices().clearTableFromCache(
-                            conn.getTenantId() == null ? 
ByteUtil.EMPTY_BYTE_ARRAY : conn.getTenantId().getBytes(),
+                            tenantIdBytes,
                             index.getSchemaName().getBytes(), 
index.getTableName().getBytes(),
                             PhoenixRuntime.getCurrentScn(readOnlyProps));
                 }
                 updateIndexesSequenceIfPresent(conn, table);
                 conn.commit();
-
             } else {
                 throw new RuntimeException("Error: problem occured during 
upgrade. Table is not upgraded successfully");
             }
@@ -1912,12 +1999,32 @@ public class UpgradeUtil {
                 logger.info(String.format("Updating link information for view 
'%s' ..", table.getTableName()));
                 updateLink(conn, oldPhysicalName, 
newPhysicalTablename,table.getSchemaName(),table.getTableName());
                 conn.commit();
-
+                
+                // if the view is a first level child, then we need to create 
the PARENT_TABLE link
+                // that was overwritten by the PHYSICAL_TABLE link 
+                if (table.getParentName().equals(table.getPhysicalName())) {
+                    logger.info(String.format("Creating PARENT link for view 
'%s' ..", table.getTableName()));
+                    // Add row linking view to its parent 
+                    PreparedStatement linkStatement = 
conn.prepareStatement(MetaDataClient.CREATE_VIEW_LINK);
+                    linkStatement.setString(1, 
Bytes.toStringBinary(tenantIdBytes));
+                    linkStatement.setString(2, 
table.getSchemaName().getString());
+                    linkStatement.setString(3, 
table.getTableName().getString());
+                    linkStatement.setString(4, 
table.getParentName().getString());
+                    linkStatement.setByte(5, 
LinkType.PARENT_TABLE.getSerializedValue());
+                    linkStatement.setString(6, null);
+                    linkStatement.execute();
+                    conn.commit();
+                }
+                
                 conn.getQueryServices().clearTableFromCache(
-                    conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : 
conn.getTenantId().getBytes(),
+                    tenantIdBytes,
                     table.getSchemaName().getBytes(), 
table.getTableName().getBytes(),
                     PhoenixRuntime.getCurrentScn(readOnlyProps));
             }
+            // Upgrade all child views
+            if (table.getType() == PTableType.TABLE) {
+                mapChildViewsToNamespace(conn.getURL(), conn.getClientInfo(), 
childViewsResult.getLinks());
+            }
         }
     }
 
@@ -1945,7 +2052,7 @@ public class UpgradeUtil {
     private static void updateLink(PhoenixConnection conn, String srcTableName,
             String destTableName, PName schemaName, PName tableName) throws 
SQLException {
         String updateLinkSql = String.format(UPDATE_LINK, destTableName);
-        boolean hasTenantId = conn.getTenantId() != null;
+        boolean hasTenantId = conn.getTenantId() != null && 
conn.getTenantId().getBytes().length!=0;
         if (hasTenantId) {
             updateLinkSql += " AND TENANT_ID  = ? ";
         }
@@ -1973,36 +2080,29 @@ public class UpgradeUtil {
         deleteLinkStatment.execute();
     }
     
-    public static void mapChildViewsToNamespace(PhoenixConnection conn, String 
table, Properties props)
+    private static void mapChildViewsToNamespace(String connUrl, Properties 
props, List<TableInfo> viewInfoList)
             throws SQLException, SnapshotCreationException, 
IllegalArgumentException, IOException,
             InterruptedException {
-        PreparedStatement preparedStatment = 
conn.prepareStatement(GET_VIEWS_QUERY);
-        preparedStatment.setString(1, SchemaUtil.normalizeIdentifier(table));
-        ResultSet rs = preparedStatment.executeQuery();
         String tenantId = null;
         String prevTenantId = null;
-        PhoenixConnection passedConn = conn;
-        while (rs.next()) {
-            tenantId = rs.getString(1);
+        PhoenixConnection conn = null;
+        for (TableInfo viewInfo : viewInfoList) {
+            tenantId = viewInfo.getTenantId()!=null ? 
Bytes.toString(viewInfo.getTenantId()) : null;
+            String viewName = 
SchemaUtil.getTableName(viewInfo.getSchemaName(), viewInfo.getTableName());
             if (prevTenantId != tenantId) {
                 if (tenantId != null) {
                     props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, 
tenantId);
                 } else {
                     props.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
                 }
-                if (passedConn != conn) {
+                if (conn!=null)
                     conn.close();
-                }
-                conn = DriverManager.getConnection(conn.getURL(), 
props).unwrap(PhoenixConnection.class);
+                conn = DriverManager.getConnection(connUrl, 
props).unwrap(PhoenixConnection.class);
             }
-            String viewName=SchemaUtil.getTableName(rs.getString(2), 
rs.getString(3));
             logger.info(String.format("Upgrading view %s for tenantId %s..", 
viewName,tenantId));
             UpgradeUtil.upgradeTable(conn, viewName);
             prevTenantId = tenantId;
         }
-        if (passedConn != conn) {
-            conn.close();
-        }
     }
 
     public static final String getSysCatalogSnapshotName(long 
currentSystemTableTimestamp) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index b79cda2..458458c 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -1253,7 +1253,7 @@ public class QueryCompilerTest extends 
BaseConnectionlessQueryTest {
         }
     }
     
-    
+
     @Test
     public void testDuplicatePKColumn() throws Exception {
         String ddl = "CREATE TABLE t (k1 VARCHAR, k1 VARCHAR CONSTRAINT pk 
PRIMARY KEY(k1))";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
index d88a915..2e881b8 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
@@ -30,6 +30,7 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -254,7 +255,7 @@ public class CorrelatePlanTest {
             PName colName = PNameFactory.newName(name);
             columns.add(new PColumnImpl(PNameFactory.newName(name), 
PNameFactory.newName(VALUE_COLUMN_FAMILY),
                     expr.getDataType(), expr.getMaxLength(), expr.getScale(), 
expr.isNullable(),
-                    i, expr.getSortOrder(), null, null, false, name, false, 
false, colName.getBytes()));
+                    i, expr.getSortOrder(), null, null, false, name, false, 
false, colName.getBytes(), HConstants.LATEST_TIMESTAMP));
         }
         try {
             PTable pTable = PTableImpl.makePTable(null, PName.EMPTY_NAME, 
PName.EMPTY_NAME,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
index 017e6c8..6bf298e 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
@@ -30,6 +30,7 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -176,7 +177,8 @@ public class LiteralResultIteratorPlanTest {
             PName colName = PNameFactory.newName(name);
             columns.add(new PColumnImpl(PNameFactory.newName(name),
                     PNameFactory.newName(VALUE_COLUMN_FAMILY), 
expr.getDataType(), expr.getMaxLength(),
-                    expr.getScale(), expr.isNullable(), i, 
expr.getSortOrder(), null, null, false, name, false, false, 
colName.getBytes()));
+                    expr.getScale(), expr.isNullable(), i, 
expr.getSortOrder(), null, null, false, name, false, false, colName.getBytes(),
+                HConstants.LATEST_TIMESTAMP));
         }
         try {
             PTable pTable = PTableImpl.makePTable(null, PName.EMPTY_NAME, 
PName.EMPTY_NAME, PTableType.SUBQUERY, null,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
index 2894e92..b956db0 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
@@ -32,7 +32,7 @@ import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -122,9 +122,11 @@ public class UnnestArrayPlanTest {
         RowKeyValueAccessor accessor = new 
RowKeyValueAccessor(Arrays.asList(dummy), 0);
         UnnestArrayPlan plan = new UnnestArrayPlan(subPlan, new 
RowKeyColumnExpression(dummy, accessor), withOrdinality);
         PName colName = PNameFactory.newName("ELEM");
-        PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), 
PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, 
SortOrder.getDefault(), null, null, false, "", false, false, 
colName.getBytes());
+        PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), 
PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, 
SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(),
+            HConstants.LATEST_TIMESTAMP);
         colName = PNameFactory.newName("IDX");
-        PColumn indexColumn = withOrdinality ? new PColumnImpl(colName, 
PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 
0, SortOrder.getDefault(), null, null, false, "", false, false, 
colName.getBytes()) : null;
+        PColumn indexColumn = withOrdinality ? new PColumnImpl(colName, 
PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 
0, SortOrder.getDefault(), null, null, false, "", false, false, 
colName.getBytes(),
+            HConstants.LATEST_TIMESTAMP) : null;
         List<PColumn> columns = withOrdinality ? Arrays.asList(elemColumn, 
indexColumn) : Arrays.asList(elemColumn);
         ProjectedColumnExpression elemExpr = new 
ProjectedColumnExpression(elemColumn, columns, 0, 
elemColumn.getName().getString());
         ProjectedColumnExpression indexExpr = withOrdinality ? new 
ProjectedColumnExpression(indexColumn, columns, 1, 
indexColumn.getName().getString()) : null;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
index 2788235..0856e79 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
@@ -25,6 +25,7 @@ import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnImpl;
 import org.apache.phoenix.schema.PName;
@@ -43,7 +44,7 @@ public class ColumnExpressionTest {
         int scale = 5;
         PName colName = PNameFactory.newName("c1");
         PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), 
PDecimal.INSTANCE, maxLen, scale,
-                true, 20, SortOrder.getDefault(), 0, null, false, null, false, 
false, colName.getBytes());
+                true, 20, SortOrder.getDefault(), 0, null, false, null, false, 
false, colName.getBytes(), HConstants.LATEST_TIMESTAMP);
         ColumnExpression colExp = new KeyValueColumnExpression(column);
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         DataOutputStream dOut = new DataOutputStream(baos);
@@ -64,7 +65,7 @@ public class ColumnExpressionTest {
         int maxLen = 30;
         PName colName = PNameFactory.newName("c1");
         PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), 
PBinary.INSTANCE, maxLen, null,
-                true, 20, SortOrder.getDefault(), 0, null, false, null, false, 
false, colName.getBytes());
+                true, 20, SortOrder.getDefault(), 0, null, false, null, false, 
false, colName.getBytes(), HConstants.LATEST_TIMESTAMP);
         ColumnExpression colExp = new KeyValueColumnExpression(column);
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         DataOutputStream dOut = new DataOutputStream(baos);
@@ -85,7 +86,7 @@ public class ColumnExpressionTest {
         int scale = 5;
         PName colName = PNameFactory.newName("c1");
         PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), 
PVarchar.INSTANCE, null, scale,
-                true, 20, SortOrder.getDefault(), 0, null, false, null, false, 
false, colName.getBytes());
+                true, 20, SortOrder.getDefault(), 0, null, false, null, false, 
false, colName.getBytes(), HConstants.LATEST_TIMESTAMP);
         ColumnExpression colExp = new KeyValueColumnExpression(column);
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         DataOutputStream dOut = new DataOutputStream(baos);
@@ -105,7 +106,7 @@ public class ColumnExpressionTest {
     public void testSerializationWithNullScaleAndMaxLength() throws Exception {
         PName colName = PNameFactory.newName("c1");
         PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), 
PDecimal.INSTANCE, null, null, true,
-                20, SortOrder.getDefault(), 0, null, false, null, false, 
false, colName.getBytes());
+                20, SortOrder.getDefault(), 0, null, false, null, false, 
false, colName.getBytes(), HConstants.LATEST_TIMESTAMP);
         ColumnExpression colExp = new KeyValueColumnExpression(column);
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         DataOutputStream dOut = new DataOutputStream(baos);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 031d8bb..63555a7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -90,7 +90,12 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Types;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -112,11 +117,17 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
@@ -138,6 +149,7 @@ import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil.ConnectionFactory;
+import org.apache.phoenix.util.TestUtil;
 import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
@@ -145,6 +157,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
@@ -218,26 +231,26 @@ public abstract class BaseTest {
                 "    CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n" 
+
                 ") ");
         builder.put(TABLE_WITH_ARRAY, "create table "
-                               + TABLE_WITH_ARRAY
-                               + "   (organization_id char(15) not null, \n"
-                               + "    entity_id char(15) not null,\n"
-                               + "    a_string_array varchar(100) array[],\n"
-                               + "    b_string varchar(100),\n"
-                               + "    a_integer integer,\n"
-                               + "    a_date date,\n"
-                               + "    a_time time,\n"
-                               + "    a_timestamp timestamp,\n"
-                               + "    x_decimal decimal(31,10),\n"
-                               + "    x_long_array bigint array[],\n"
-                               + "    x_integer integer,\n"
-                               + "    a_byte_array tinyint array[],\n"
-                               + "    a_short smallint,\n"
-                               + "    a_float float,\n"
-                               + "    a_double_array double array[],\n"
-                               + "    a_unsigned_float unsigned_float,\n"
-                               + "    a_unsigned_double unsigned_double \n"
-                               + "    CONSTRAINT pk PRIMARY KEY 
(organization_id, entity_id)\n"
-                               + ")");
+                + TABLE_WITH_ARRAY
+                + "   (organization_id char(15) not null, \n"
+                + "    entity_id char(15) not null,\n"
+                + "    a_string_array varchar(100) array[],\n"
+                + "    b_string varchar(100),\n"
+                + "    a_integer integer,\n"
+                + "    a_date date,\n"
+                + "    a_time time,\n"
+                + "    a_timestamp timestamp,\n"
+                + "    x_decimal decimal(31,10),\n"
+                + "    x_long_array bigint array[],\n"
+                + "    x_integer integer,\n"
+                + "    a_byte_array tinyint array[],\n"
+                + "    a_short smallint,\n"
+                + "    a_float float,\n"
+                + "    a_double_array double array[],\n"
+                + "    a_unsigned_float unsigned_float,\n"
+                + "    a_unsigned_double unsigned_double \n"
+                + "    CONSTRAINT pk PRIMARY KEY (organization_id, 
entity_id)\n"
+                + ")");
         builder.put(BTABLE_NAME,"create table " + BTABLE_NAME +
                 "   (a_string varchar not null, \n" +
                 "    a_id char(3) not null,\n" +
@@ -388,7 +401,7 @@ public abstract class BaseTest {
     protected static String url;
     protected static PhoenixTestDriver driver;
     protected static boolean clusterInitialized = false;
-    private static HBaseTestingUtility utility;
+    protected static HBaseTestingUtility utility;
     protected static final Configuration config = HBaseConfiguration.create();
 
     private static class TearDownMiniClusterThreadFactory implements 
ThreadFactory {
@@ -720,9 +733,9 @@ public abstract class BaseTest {
             throw new IllegalStateException("Used up all unique names");
         }
         TABLE_COUNTER.incrementAndGet();
-        return "T" + Integer.toString(MAX_SUFFIX_VALUE + 
nextName).substring(1);
+        return "N" + Integer.toString(MAX_SUFFIX_VALUE + 
nextName).substring(1);
     }
-
+    
     private static AtomicInteger SEQ_NAME_SUFFIX = new AtomicInteger(0);
     private static final int MAX_SEQ_SUFFIX_VALUE = 1000000;
 
@@ -872,12 +885,18 @@ public abstract class BaseTest {
             // Make sure all tables and views have been dropped
             props.remove(CURRENT_SCN_ATTRIB);
             try (Connection seeLatestConn = DriverManager.getConnection(url, 
props)) {
-               DatabaseMetaData dbmd = seeLatestConn.getMetaData();
-               ResultSet rs = dbmd.getTables(null, null, null, new 
String[]{PTableType.VIEW.toString(), PTableType.TABLE.toString()});
-               boolean hasTables = rs.next();
-               if (hasTables) {
-                       fail("The following tables are not deleted that should 
be:" + getTableNames(rs));
-               }
+                DatabaseMetaData dbmd = seeLatestConn.getMetaData();
+                ResultSet rs = dbmd.getTables(null, null, null, new 
String[]{PTableType.VIEW.toString(), PTableType.TABLE.toString()});
+                while (rs.next()) {
+                    String fullTableName = SchemaUtil.getEscapedTableName(
+                            rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM),
+                            rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
+                    try {
+                        PhoenixRuntime.getTable(conn, fullTableName);
+                        fail("The following tables are not deleted that should 
be:" + getTableNames(rs));
+                    } catch (TableNotFoundException e) {
+                    }
+                }
             }
         }
         finally {
@@ -926,12 +945,12 @@ public abstract class BaseTest {
     }
     
     private static String getTableNames(ResultSet rs) throws SQLException {
-       StringBuilder buf = new StringBuilder();
-       do {
-               buf.append(" ");
-               
buf.append(SchemaUtil.getTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM),
 rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)));
-       } while (rs.next());
-       return buf.toString();
+        StringBuilder buf = new StringBuilder();
+        do {
+            buf.append(" ");
+            
buf.append(SchemaUtil.getTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM),
 rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)));
+        } while (rs.next());
+        return buf.toString();
     }
 
     private static String getSchemaNames(ResultSet rs) throws SQLException {
@@ -1281,10 +1300,10 @@ public abstract class BaseTest {
     }
     
     private static String initEntityHistoryTableValues(String tableName, 
String tenantId, byte[][] splits, Date date, Long ts, String url) throws 
Exception {
-       if (tableName == null) {
-               tableName = generateUniqueName();
-       }
-       
+        if (tableName == null) {
+            tableName = generateUniqueName();
+        }
+        
         if (ts == null) {
             ensureTableCreated(url, tableName, ENTITY_HISTORY_TABLE_NAME, 
splits, null);
         } else {
@@ -1391,10 +1410,10 @@ public abstract class BaseTest {
     }
     
     protected static String initSaltedEntityHistoryTableValues(String 
tableName, String tenantId, byte[][] splits, Date date, Long ts, String url) 
throws Exception {
-       if (tableName == null) {
-               tableName = generateUniqueName();
-       }
-       
+        if (tableName == null) {
+            tableName = generateUniqueName();
+        }
+        
         if (ts == null) {
             ensureTableCreated(url, tableName, 
ENTITY_HISTORY_SALTED_TABLE_NAME, splits, null);
         } else {
@@ -1613,42 +1632,42 @@ public abstract class BaseTest {
     }
     
     public static void upsertRows(Connection conn, String fullTableName, int 
numRows) throws SQLException {
-       for (int i=1; i<=numRows; ++i) {
-               upsertRow(conn, fullTableName, i, false);
-       }
+        for (int i=1; i<=numRows; ++i) {
+            upsertRow(conn, fullTableName, i, false);
+        }
     }
 
     public static void upsertRow(Connection conn, String fullTableName, int 
index, boolean firstRowInBatch) throws SQLException {
-       String upsert = "UPSERT INTO " + fullTableName
+        String upsert = "UPSERT INTO " + fullTableName
                 + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 
?)";
-               PreparedStatement stmt = conn.prepareStatement(upsert);
-               stmt.setString(1, firstRowInBatch ? "firstRowInBatch_" : "" + 
"varchar"+index);
-               stmt.setString(2, "char"+index);
-               stmt.setInt(3, index);
-               stmt.setLong(4, index);
-               stmt.setBigDecimal(5, new BigDecimal(index));
-               Date date = DateUtil.parseDate("2015-01-01 00:00:00");
-               stmt.setDate(6, date);
-               stmt.setString(7, "varchar_a");
-               stmt.setString(8, "chara");
-               stmt.setInt(9, index+1);
-               stmt.setLong(10, index+1);
-               stmt.setBigDecimal(11, new BigDecimal(index+1));
-               stmt.setDate(12, date);
-               stmt.setString(13, "varchar_b");
-               stmt.setString(14, "charb");
-               stmt.setInt(15, index+2);
-               stmt.setLong(16, index+2);
-               stmt.setBigDecimal(17, new BigDecimal(index+2));
-               stmt.setDate(18, date);
-               stmt.executeUpdate();
-       }
+        PreparedStatement stmt = conn.prepareStatement(upsert);
+        stmt.setString(1, firstRowInBatch ? "firstRowInBatch_" : "" + 
"varchar"+index);
+        stmt.setString(2, "char"+index);
+        stmt.setInt(3, index);
+        stmt.setLong(4, index);
+        stmt.setBigDecimal(5, new BigDecimal(index));
+        Date date = DateUtil.parseDate("2015-01-01 00:00:00");
+        stmt.setDate(6, date);
+        stmt.setString(7, "varchar_a");
+        stmt.setString(8, "chara");
+        stmt.setInt(9, index+1);
+        stmt.setLong(10, index+1);
+        stmt.setBigDecimal(11, new BigDecimal(index+1));
+        stmt.setDate(12, date);
+        stmt.setString(13, "varchar_b");
+        stmt.setString(14, "charb");
+        stmt.setInt(15, index+2);
+        stmt.setLong(16, index+2);
+        stmt.setBigDecimal(17, new BigDecimal(index+2));
+        stmt.setDate(18, date);
+        stmt.executeUpdate();
+    }
 
     // Populate the test table with data.
     public static void populateTestTable(String fullTableName) throws 
SQLException {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
-               upsertRows(conn, fullTableName, 3);
+            upsertRows(conn, fullTableName, 3);
             conn.commit();
         }
     }
@@ -1758,4 +1777,144 @@ public abstract class BaseTest {
         }
         phxConn.close();
     }
+    
+
+    /**
+     *  Split SYSTEM.CATALOG at the given split point 
+     */
+    protected static void splitRegion(byte[] splitPoint) throws SQLException, 
IOException, InterruptedException {
+        Admin admin =
+                driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+        admin.split(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME, 
splitPoint);
+        // make sure the split finishes (there's no synchronous splitting 
before HBase 2.x)
+        
admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+        
admin.enableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+    }
+    
+    /**
+     * Returns true if the region contains atleast one of the metadata rows we 
are interested in
+     */
+    protected static boolean regionContainsMetadataRows(HRegionInfo regionInfo,
+            List<byte[]> metadataRowKeys) {
+        for (byte[] rowKey : metadataRowKeys) {
+            if (regionInfo.containsRow(rowKey)) {
+                return true;
+            }
+        }
+        return false;
+    }
+    
+    /**
+     * Splits SYSTEM.CATALOG into multiple regions based on the table or view 
names passed in.
+     * Metadata for each table or view is moved to a separate region,
+     * @param tenantToTableAndViewMap map from tenant to tables and views 
owned by the tenant
+     */
+    protected static void splitSystemCatalog(Map<String, List<String>> 
tenantToTableAndViewMap) throws Exception  {
+        List<byte[]> splitPoints = Lists.newArrayListWithExpectedSize(5);
+        // add the rows keys of the table or view metadata rows
+        Set<String> schemaNameSet=Sets.newHashSetWithExpectedSize(15);
+        for (Entry<String, List<String>> entrySet : 
tenantToTableAndViewMap.entrySet()) {
+            String tenantId = entrySet.getKey();
+            for (String fullName : entrySet.getValue()) {
+                String schemaName = 
SchemaUtil.getSchemaNameFromFullName(fullName);
+                // we don't allow SYSTEM.CATALOG to split within a schema, so 
to ensure each table
+                // or view is on a separate region they need to have a unique 
tenant and schema name
+                assertTrue("Schema names of tables/view must be unique ", 
schemaNameSet.add(tenantId+"."+schemaName));
+                String tableName = 
SchemaUtil.getTableNameFromFullName(fullName);
+                splitPoints.add(
+                    SchemaUtil.getTableKey(tenantId, "".equals(schemaName) ? 
null : schemaName, tableName));
+            }
+        }
+        Collections.sort(splitPoints, Bytes.BYTES_COMPARATOR);
+        
+        Admin admin =
+                driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+        assertTrue("Needs at least two split points ", splitPoints.size() > 1);
+        assertTrue(
+            "Number of split points should be less than or equal to the number 
of region servers ",
+            splitPoints.size() <= NUM_SLAVES_BASE);
+        HBaseTestingUtility util = getUtility();
+        MiniHBaseCluster cluster = util.getHBaseCluster();
+        HMaster master = cluster.getMaster();
+        AssignmentManager am = master.getAssignmentManager();
+        // No need to split on the first splitPoint since the end key of 
region boundaries are exclusive
+        for (int i=1; i<splitPoints.size(); ++i) {
+            splitRegion(splitPoints.get(i));
+        }
+        HashMap<ServerName, List<HRegionInfo>> serverToRegionsList = 
Maps.newHashMapWithExpectedSize(NUM_SLAVES_BASE);
+        Deque<ServerName> availableRegionServers = new 
ArrayDeque<ServerName>(NUM_SLAVES_BASE);
+        for (int i=0; i<NUM_SLAVES_BASE; ++i) {
+            
availableRegionServers.push(util.getHBaseCluster().getRegionServer(i).getServerName());
+        }
+        List<HRegionInfo> tableRegions =
+                
admin.getTableRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+        for (HRegionInfo hRegionInfo : tableRegions) {
+            // filter on regions we are interested in
+            if (regionContainsMetadataRows(hRegionInfo, splitPoints)) {
+                ServerName serverName = 
am.getRegionStates().getRegionServerOfRegion(hRegionInfo);
+                if (!serverToRegionsList.containsKey(serverName)) {
+                    serverToRegionsList.put(serverName, new 
ArrayList<HRegionInfo>());
+                }
+                serverToRegionsList.get(serverName).add(hRegionInfo);
+                availableRegionServers.remove(serverName);
+                // Scan scan = new Scan();
+                // scan.setStartRow(hRegionInfo.getStartKey());
+                // scan.setStopRow(hRegionInfo.getEndKey());
+                // HTable primaryTable = new 
HTable(getUtility().getConfiguration(),
+                // PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+                // ResultScanner resultScanner = primaryTable.getScanner(scan);
+                // for (Result result : resultScanner) {
+                // System.out.println(result);
+                // }
+            }
+        }
+        assertTrue("No region servers available to move regions on to ", 
!availableRegionServers.isEmpty());
+        for (Entry<ServerName, List<HRegionInfo>> entry : 
serverToRegionsList.entrySet()) {
+            List<HRegionInfo> regions = entry.getValue();
+            if (regions.size()>1) {
+                for (int i=1; i< regions.size(); ++i) {
+                    moveRegion(regions.get(i), entry.getKey(), 
availableRegionServers.pop());
+                }
+            }
+        }
+        
+        // verify each region of SYSTEM.CATALOG is on its own region server
+        tableRegions =
+                
admin.getTableRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+        Set<ServerName> serverNames = Sets.newHashSet();
+        for (HRegionInfo hRegionInfo : tableRegions) {
+            // filter on regions we are interested in
+            if (regionContainsMetadataRows(hRegionInfo, splitPoints)) {
+                ServerName serverName = 
am.getRegionStates().getRegionServerOfRegion(hRegionInfo);
+                if (!serverNames.contains(serverName)) {
+                    serverNames.add(serverName);
+                }
+                else {
+                    fail("Multiple regions on "+serverName.getServerName());
+                }
+            }
+        }
+    }
+    
+    /**
+     * Ensures each region of SYSTEM.CATALOG is on a different region server
+     */
+    private static void moveRegion(HRegionInfo regionInfo, ServerName 
srcServerName, ServerName dstServerName) throws Exception  {
+        Admin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+        HBaseTestingUtility util = getUtility();
+        MiniHBaseCluster cluster = util.getHBaseCluster();
+        HMaster master = cluster.getMaster();
+        AssignmentManager am = master.getAssignmentManager();
+   
+        HRegionServer dstServer = 
util.getHBaseCluster().getRegionServer(dstServerName);
+        HRegionServer srcServer = 
util.getHBaseCluster().getRegionServer(srcServerName);
+        byte[] encodedRegionNameInBytes = regionInfo.getEncodedNameAsBytes();
+        admin.move(encodedRegionNameInBytes, 
Bytes.toBytes(dstServer.getServerName().getServerName()));
+        while (dstServer.getOnlineRegion(regionInfo.getRegionName()) == null
+                || 
dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+                || 
srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)) {
+            // wait for the move to be finished
+            Thread.sleep(100);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
index 0443b77..485a21f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -62,7 +63,26 @@ public class MetaDataUtilTest {
         
assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10),
 3, 5));
     }
 
-  /**
+    @Test
+    public void testMutatingAPut() throws Exception {
+        String version = VersionInfo.getVersion();
+        KeyValueBuilder builder = KeyValueBuilder.get(version);
+        byte[] row = Bytes.toBytes("row");
+        byte[] family = PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
+        byte[] qualifier = Bytes.toBytes("qual");
+        byte[] value = Bytes.toBytes("generic-value");
+        KeyValue kv = builder.buildPut(wrap(row), wrap(family), 
wrap(qualifier), wrap(value));
+        Put put = new Put(row);
+        KeyValueBuilder.addQuietly(put, builder, kv);
+        byte[] newValue = Bytes.toBytes("new-value");
+        Cell cell = put.get(family, qualifier).get(0);
+        assertEquals(Bytes.toString(value), 
Bytes.toString(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength()));
+        MetaDataUtil.mutatePutValue(put, family, qualifier, newValue);
+        cell = put.get(family, qualifier).get(0);
+        assertEquals(Bytes.toString(newValue), 
Bytes.toString(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength()));
+    }
+
+    /**
    * Ensure it supports {@link GenericKeyValueBuilder}
    * @throws Exception on failure
    */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java 
b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index cb082f7..788b38b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -134,7 +134,7 @@ public class TestUtil {
     private static final Log LOG = LogFactory.getLog(TestUtil.class);
     
     private static final Long ZERO = new Long(0);
-    public static final String DEFAULT_SCHEMA_NAME = "";
+    public static final String DEFAULT_SCHEMA_NAME = "S";
     public static final String DEFAULT_DATA_TABLE_NAME = "T";
     public static final String DEFAULT_INDEX_TABLE_NAME = "I";
     public static final String DEFAULT_DATA_TABLE_FULL_NAME = 
SchemaUtil.getTableName(DEFAULT_SCHEMA_NAME, "T");
@@ -724,6 +724,22 @@ public class TestUtil {
                 public String getExpressionStr() {
                     return null;
                 }
+
+                @Override
+                public long getTimestamp() {
+                    return HConstants.LATEST_TIMESTAMP;
+                }
+
+                @Override
+                public boolean isDerived() {
+                    return false;
+                }
+
+                @Override
+                public boolean isExcluded() {
+                    return false;
+                }
+
                 @Override
                 public boolean isRowTimestamp() {
                     return false;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-protocol/src/main/MetaDataService.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/MetaDataService.proto 
b/phoenix-protocol/src/main/MetaDataService.proto
index 369522c..13d8f1a 100644
--- a/phoenix-protocol/src/main/MetaDataService.proto
+++ b/phoenix-protocol/src/main/MetaDataService.proto
@@ -50,6 +50,8 @@ enum MutationCode {
   AUTO_PARTITION_SEQUENCE_NOT_FOUND = 20;
   CANNOT_COERCE_AUTO_PARTITION_ID = 21;
   TOO_MANY_INDEXES = 22;
+  UNABLE_TO_CREATE_CHILD_LINK = 23;
+  UNABLE_TO_UPDATE_PARENT_TABLE = 24;
 };
 
 message SharedTableState {
@@ -83,6 +85,9 @@ message GetTableRequest {
   required int64 tableTimestamp = 4;
   required int64 clientTimestamp = 5;
   optional int32 clientVersion = 6;    
+  optional bool skipAddingParentColumns = 7;
+  optional bool skipAddingIndexes = 8;
+  optional PTable lockedAncestorTable = 9;
 }
 
 message GetFunctionsRequest {
@@ -125,6 +130,7 @@ message DropTableRequest {
   required string tableType = 2;
   optional bool cascade = 3;
   optional int32 clientVersion = 4;
+  optional bool skipAddingParentColumns = 5;
 }
 
 message DropSchemaRequest {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/phoenix-protocol/src/main/PTable.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/PTable.proto 
b/phoenix-protocol/src/main/PTable.proto
index 16381dd..bc868bc 100644
--- a/phoenix-protocol/src/main/PTable.proto
+++ b/phoenix-protocol/src/main/PTable.proto
@@ -35,12 +35,12 @@ enum PTableType {
 message PColumn {
   required bytes columnNameBytes = 1;
   optional bytes familyNameBytes = 2;
-  required string dataType = 3;
+  optional string dataType = 3;
   optional int32 maxLength = 4;
   optional int32 scale = 5;
   required bool nullable = 6;
   required int32 position = 7;
-  required int32 sortOrder = 8;
+  optional int32 sortOrder = 8;
   optional int32 arraySize = 9;
   optional bytes viewConstant = 10;
   optional bool viewReferenced = 11;
@@ -48,6 +48,8 @@ message PColumn {
   optional bool isRowTimestamp = 13;
   optional bool isDynamic = 14;
   optional bytes columnQualifierBytes = 15;
+  optional int64 timestamp = 16;
+  optional bool derived = 17 [default = false];
 }
 
 message PTableStats {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d56fd3c9/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0dcb45c..8515d71 100644
--- a/pom.xml
+++ b/pom.xml
@@ -320,6 +320,24 @@
                  <goal>verify</goal>
               </goals>
             </execution>
+<execution>
+              <id>SplitSystemCatalogTests</id>
+              <configuration>
+                 <encoding>UTF-8</encoding>
+                 <forkCount>${numForkedIT}</forkCount>
+                 <runOrder>alphabetical</runOrder>
+                 <reuseForks>false</reuseForks>
+                 <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=256m 
-Djava.security.egd=file:/dev/./urandom 
"-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/</argLine>
+                 
<redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
+                 
<testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
+                 
<groups>org.apache.phoenix.end2end.SplitSystemCatalogTests</groups>
+                 <shutdown>kill</shutdown>
+              </configuration>
+              <goals>
+                 <goal>integration-test</goal>
+                 <goal>verify</goal>
+              </goals>
+            </execution>
           </executions>
         </plugin>
         <plugin>

Reply via email to