PHOENIX-3955: Ensure KEEP_DELETED_CELLS, REPLICATION_SCOPE, and TTL properties 
stay in sync between the physical data table and index tables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eb13ffd8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eb13ffd8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eb13ffd8

Branch: refs/heads/4.x-cdh5.15
Commit: eb13ffd863e1a599fff07990b1e1a76badd6639f
Parents: 8702645
Author: Chinmay Kulkarni <chinmayskulka...@gmail.com>
Authored: Mon Oct 8 05:12:55 2018 +0100
Committer: Pedro Boado <pbo...@apache.org>
Committed: Wed Oct 17 22:50:43 2018 +0100

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterTableIT.java    |   5 +-
 .../org/apache/phoenix/end2end/BaseQueryIT.java |  15 +-
 .../apache/phoenix/end2end/CreateTableIT.java   |  27 +-
 .../phoenix/end2end/PropertiesInSyncIT.java     | 494 +++++++++++++++++++
 .../end2end/QueryDatabaseMetaDataIT.java        |   7 +-
 .../apache/phoenix/end2end/SetPropertyIT.java   |  64 ++-
 .../org/apache/phoenix/end2end/SplitIT.java     |  17 +
 .../org/apache/phoenix/tx/TransactionIT.java    |   4 +-
 .../phoenix/exception/SQLExceptionCode.java     |   6 +-
 .../query/ConnectionQueryServicesImpl.java      | 485 +++++++++++++-----
 .../apache/phoenix/schema/MetaDataClient.java   | 112 +++--
 .../apache/phoenix/schema/TableProperty.java    |   4 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  44 +-
 .../org/apache/phoenix/util/UpgradeUtil.java    | 142 +++++-
 14 files changed, 1187 insertions(+), 239 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 2cac1a6..7af62b3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -925,7 +925,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
             // set HColumnProperty property when adding a pk column and other 
key value columns should work
             ddl = "ALTER TABLE "
                     + dataTableFullName
-                    + " ADD k3 DECIMAL PRIMARY KEY, col2 bigint, CF.col3 
bigint IN_MEMORY = true, CF.IN_MEMORY=false, CF.REPLICATION_SCOPE = 1";
+                    + " ADD k3 DECIMAL PRIMARY KEY, col2 bigint, CF.col3 
bigint IN_MEMORY = true,"
+                    + " CF.IN_MEMORY=false, REPLICATION_SCOPE = 1";
             conn.createStatement().execute(ddl);
             // assert that k3 was added as new pk
             ResultSet rs = conn.getMetaData().getPrimaryKeys("", schemaName, 
dataTableName);
@@ -946,7 +947,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                 assertEquals(2, columnFamilies.length);
                 assertEquals("0", columnFamilies[0].getNameAsString());
                 assertEquals(true, columnFamilies[0].isInMemory());
-                assertEquals(0, columnFamilies[0].getScope());
+                assertEquals(1, columnFamilies[0].getScope());
                 assertEquals("CF", columnFamilies[1].getNameAsString());
                 assertEquals(false, columnFamilies[1].isInMemory());
                 assertEquals(1, columnFamilies[1].getScope());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index ed3669c..e88dc57 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -53,20 +53,20 @@ public abstract class BaseQueryIT extends 
ParallelStatsDisabledIT {
     protected static final String[] GLOBAL_INDEX_DDLS =
             new String[] {
                     "CREATE INDEX %s ON %s (a_integer DESC) INCLUDE (" + "    
A_STRING, "
-                            + "    B_STRING, " + "    A_DATE) %s",
+                            + "    B_STRING, " + "    A_DATE)",
                     "CREATE INDEX %s ON %s (a_integer, a_string) INCLUDE (" + 
"    B_STRING, "
-                            + "    A_DATE) %s",
+                            + "    A_DATE)",
                     "CREATE INDEX %s ON %s (a_integer) INCLUDE (" + "    
A_STRING, "
-                            + "    B_STRING, " + "    A_DATE) %s",
+                            + "    B_STRING, " + "    A_DATE)",
                     NO_INDEX };
     protected static final String[] LOCAL_INDEX_DDLS =
             new String[] {
                     "CREATE LOCAL INDEX %s ON %s (a_integer DESC) INCLUDE (" + 
"    A_STRING, "
-                            + "    B_STRING, " + "    A_DATE) %s",
+                            + "    B_STRING, " + "    A_DATE)",
                     "CREATE LOCAL INDEX %s ON %s (a_integer, a_string) INCLUDE 
(" + "    B_STRING, "
-                            + "    A_DATE) %s",
+                            + "    A_DATE)",
                     "CREATE LOCAL INDEX %s ON %s (a_integer) INCLUDE (" + "    
A_STRING, "
-                            + "    B_STRING, " + "    A_DATE) %s" };
+                            + "    B_STRING, " + "    A_DATE)" };
     protected static String[] INDEX_DDLS;
     static {
         INDEX_DDLS = new String[GLOBAL_INDEX_DDLS.length + 
LOCAL_INDEX_DDLS.length];
@@ -108,8 +108,7 @@ public abstract class BaseQueryIT extends 
ParallelStatsDisabledIT {
         this.indexName = generateUniqueName();
         if (idxDdl.length() > 0) {
             this.indexDDL =
-                    String.format(idxDdl, indexName, tableName,
-                        keepDeletedCells ? "KEEP_DELETED_CELLS=true" : 
"KEEP_DELETED_CELLS=false");
+                    String.format(idxDdl, indexName, tableName);
             Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
             try (Connection conn = DriverManager.getConnection(getUrl(), 
props)) {
                 conn.createStatement().execute(this.indexDDL);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 491889d..fb6a0ce 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -17,7 +17,6 @@
  */
 package org.apache.phoenix.end2end;
 
-import static 
org.apache.hadoop.hbase.HColumnDescriptor.DEFAULT_REPLICATION_SCOPE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
@@ -275,9 +274,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
 
     /**
      * Tests that when: 1) DDL has both pk as well as key value columns 2) Key 
value columns have
-     * both default and explicit column family names 3) Replication scope 
specifier has the explicit
-     * column family name. Then: 1)REPLICATION_SCOPE is set. 2)The default 
column family has
-     * DEFAULT_REPLICATION_SCOPE. 3)The explicit column family has the 
REPLICATION_SCOPE specified
+     * both default and explicit column family names 3) Block size specifier 
has the explicit
+     * column family name. Then: 1)BLOCKSIZE is set. 2)The default column 
family has
+     * DEFAULT_BLOCKSIZE. 3)The explicit column family has the BLOCK_SIZE 
specified
      * in DDL.
      */
     @Test
@@ -287,7 +286,7 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
                 "create table IF NOT EXISTS  " + tableName + "  (" + " id 
char(1) NOT NULL,"
                         + " col1 integer NOT NULL," + " b.col2 bigint," + " 
col3 bigint, "
                         + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1)"
-                        + " ) b.REPLICATION_SCOPE=1, SALT_BUCKETS = 4";
+                        + " ) b.BLOCKSIZE=50000, SALT_BUCKETS = 4";
         Properties props = new Properties();
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(ddl);
@@ -296,16 +295,16 @@ public class CreateTableIT extends 
ParallelStatsDisabledIT {
                 
admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
         assertEquals(2, columnFamilies.length);
         assertEquals("0", columnFamilies[0].getNameAsString());
-        assertEquals(DEFAULT_REPLICATION_SCOPE, columnFamilies[0].getScope());
+        assertEquals(HColumnDescriptor.DEFAULT_BLOCKSIZE, 
columnFamilies[0].getBlocksize());
         assertEquals("B", columnFamilies[1].getNameAsString());
-        assertEquals(1, columnFamilies[1].getScope());
+        assertEquals(50000, columnFamilies[1].getBlocksize());
     }
 
     /**
      * Tests that when: 1) DDL has both pk as well as key value columns 2) Key 
value columns have
-     * explicit column family names 3) Different REPLICATION_SCOPE specifiers 
for different column
-     * family names. Then: 1)REPLICATION_SCOPE is set. 2)Each explicit column 
family has the
-     * REPLICATION_SCOPE as specified in DDL.
+     * explicit column family names 3) Different BLOCKSIZE specifiers for 
different column
+     * family names. Then: 1)BLOCKSIZE is set. 2)Each explicit column family 
has the
+     * BLOCKSIZE as specified in DDL.
      */
     @Test
     public void testCreateTableColumnFamilyHBaseAttribs5() throws Exception {
@@ -314,7 +313,7 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
                 "create table IF NOT EXISTS  " + tableName + "  (" + " id 
char(1) NOT NULL,"
                         + " col1 integer NOT NULL," + " b.col2 bigint," + " 
c.col3 bigint, "
                         + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1)"
-                        + " ) b.REPLICATION_SCOPE=0, c.REPLICATION_SCOPE=1, 
SALT_BUCKETS = 4";
+                        + " ) b.BLOCKSIZE=50000, c.BLOCKSIZE=60000, 
SALT_BUCKETS = 4";
         Properties props = new Properties();
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(ddl);
@@ -323,9 +322,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
                 
admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
         assertEquals(2, columnFamilies.length);
         assertEquals("B", columnFamilies[0].getNameAsString());
-        assertEquals(0, columnFamilies[0].getScope());
+        assertEquals(50000, columnFamilies[0].getBlocksize());
         assertEquals("C", columnFamilies[1].getNameAsString());
-        assertEquals(1, columnFamilies[1].getScope());
+        assertEquals(60000, columnFamilies[1].getBlocksize());
     }
 
     /**
@@ -449,7 +448,7 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
         try {
             conn.createStatement().execute(ddl);
         } catch (SQLException sqle) {
-            
assertEquals(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL.getErrorCode(),
+            
assertEquals(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY.getErrorCode(),
                 sqle.getErrorCode());
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertiesInSyncIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertiesInSyncIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertiesInSyncIT.java
new file mode 100644
index 0000000..db44735
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertiesInSyncIT.java
@@ -0,0 +1,494 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeepDeletedCells;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static 
org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES;
+import static 
org.apache.phoenix.util.MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_PROPERTIES;
+import static org.apache.phoenix.util.MetaDataUtil.VIEW_INDEX_TABLE_PREFIX;
+import static org.apache.phoenix.util.UpgradeUtil.syncTableAndIndexProperties;
+
+/**
+ * Test properties that need to be kept in sync amongst all column families 
and indexes of a table
+ */
+public class PropertiesInSyncIT extends ParallelStatsDisabledIT {
+    private static final String COL_FAM1 = "CF1";
+    private static final String COL_FAM2 = "CF2";
+    private static final String NEW_CF = "NEW_CF";
+    private static final String DUMMY_PROP_VALUE = "dummy";
+    private static final int INITIAL_TTL_VALUE = 700;
+    private static final KeepDeletedCells INITIAL_KEEP_DELETED_CELLS_VALUE = 
KeepDeletedCells.TRUE;
+    private static final int INITIAL_REPLICATION_SCOPE_VALUE = 1;
+    private static final int MODIFIED_TTL_VALUE = INITIAL_TTL_VALUE + 300;
+    private static final KeepDeletedCells MODIFIED_KEEP_DELETED_CELLS_VALUE =
+            (INITIAL_KEEP_DELETED_CELLS_VALUE == KeepDeletedCells.TRUE)
+                    ? KeepDeletedCells.FALSE: KeepDeletedCells.TRUE;
+    private static final int MODIFIED_REPLICATION_SCOPE_VALUE =
+            (INITIAL_REPLICATION_SCOPE_VALUE == 1) ? 0 : 1;
+
+
+    // Test that we disallow specifying synced properties to be set per column 
family
+    // when creating a table
+    @Test
+    public void testDisallowSyncedPropsToBeSetColFamSpecificCreateTable() 
throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = generateUniqueName();
+        for (String propName: SYNCED_DATA_TABLE_AND_INDEX_PROPERTIES) {
+            try {
+                conn.createStatement().execute("create table " + tableName
+                        + " (id INTEGER not null primary key, "
+                        + COL_FAM1 + ".name varchar(10), " + COL_FAM2 + ".flag 
boolean) "
+                        + COL_FAM1 + "." + propName + "=" + DUMMY_PROP_VALUE);
+                fail("Should fail with SQLException when setting synced 
property for"
+                        + " a specific column family");
+            } catch (SQLException sqlE) {
+                assertEquals("Should fail to set synced property for a 
specific column family",
+                        
SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY.getErrorCode(),
+                        sqlE.getErrorCode());
+            }
+        }
+        conn.close();
+    }
+
+    // Test that all column families have the same value of synced properties 
when creating a table
+    @Test
+    public void testSyncedPropsAllColFamsCreateTable() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = createBaseTableWithProps(conn);
+        verifyHBaseColumnFamilyProperties(tableName, conn, false, false);
+        conn.close();
+    }
+
+    // Test that we disallow specifying synced properties to be set when 
creating an index
+    // on a physical table or a view
+    @Test
+    public void testDisallowSyncedPropsToBeSetCreateIndex() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = createBaseTableWithProps(conn);
+        String localIndexName = tableName + "_LOCAL_IDX";
+        String globalIndexName = tableName + "_GLOBAL_IDX";
+        String viewName = "VIEW_" + tableName;
+        conn.createStatement().execute("create view " + viewName
+                + " (new_col SMALLINT) as select * from " + tableName + " 
where id > 1");
+        for (String propName: SYNCED_DATA_TABLE_AND_INDEX_PROPERTIES) {
+            try {
+                conn.createStatement().execute("create local index " + 
localIndexName
+                        + " on " + tableName + "(name) "
+                        + propName + "=" + DUMMY_PROP_VALUE);
+                fail("Should fail with SQLException when setting synced 
property for "
+                        + "a local index");
+            } catch (SQLException sqlE) {
+                assertEquals("Should fail to set synced property for a local 
index",
+                        
SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX.getErrorCode(),
+                        sqlE.getErrorCode());
+            }
+            try {
+                conn.createStatement().execute("create index " + 
globalIndexName
+                        + " on " + tableName + "(flag) "
+                        + propName + "=" + DUMMY_PROP_VALUE);
+                fail("Should fail with SQLException when setting synced 
property for"
+                        + " a global index");
+            } catch (SQLException sqlE) {
+                assertEquals("Should fail to set synced property for a global 
index",
+                        
SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX.getErrorCode(),
+                        sqlE.getErrorCode());
+            }
+            try {
+                conn.createStatement().execute("create index view_index"
+                        + " on " + viewName + " (flag)" + propName + "=" + 
DUMMY_PROP_VALUE);
+                fail("Should fail with SQLException when setting synced 
property for a view index");
+            } catch (SQLException sqlE) {
+                assertEquals("Should fail to set synced property for a view 
index",
+                        
SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX.getErrorCode(),
+                        sqlE.getErrorCode());
+            }
+        }
+        conn.close();
+    }
+
+    // Test that indexes have the same value of synced properties as their 
base table
+    @Test
+    public void testSyncedPropsBaseTableCreateIndex() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = createBaseTableWithProps(conn);
+        createIndexTable(conn, tableName, PTable.IndexType.LOCAL);
+        String globalIndexName = createIndexTable(conn, tableName, 
PTable.IndexType.GLOBAL);
+
+        // We pass the base table as the physical HBase table since our check 
includes checking
+        // the local index column family too
+        verifyHBaseColumnFamilyProperties(tableName, conn, false, false);
+        verifyHBaseColumnFamilyProperties(globalIndexName, conn, false, false);
+        conn.close();
+    }
+
+    // Test that the physical view index table has the same value of synced 
properties
+    // as its base table
+    @Test
+    public void testSyncedPropsBaseTableCreateViewIndex() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = createBaseTableWithProps(conn);
+        String viewIndexName = createIndexTable(conn, tableName, null);
+
+        verifyHBaseColumnFamilyProperties(tableName, conn, false, false);
+        verifyHBaseColumnFamilyProperties(viewIndexName, conn, false, false);
+        conn.close();
+    }
+
+    // Test that we disallow specifying synced properties to be set per column 
family
+    // when altering a table
+    @Test
+    public void testDisallowSyncedPropsToBeSetColFamSpecificAlterTable() 
throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = createBaseTableWithProps(conn);
+        StringBuilder alterAllSyncedPropsString = new StringBuilder();
+        String modPropString = COL_FAM1 + ".%s=" + DUMMY_PROP_VALUE + ",";
+        for (String propName: SYNCED_DATA_TABLE_AND_INDEX_PROPERTIES) {
+            try {
+                conn.createStatement().execute("alter table " + tableName
+                        + " set " + COL_FAM1 + "." + propName + "=" + 
DUMMY_PROP_VALUE);
+                fail("Should fail with SQLException when altering synced 
property for a"
+                        + " specific column family");
+            } catch (SQLException sqlE) {
+                assertEquals("Should fail to alter synced property for a 
specific column family",
+                        
SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY.getErrorCode(),
+                        sqlE.getErrorCode());
+            }
+            alterAllSyncedPropsString.append(String.format(modPropString, 
propName));
+        }
+
+        // Test the same when we try to set all of these properties at once
+        try {
+            conn.createStatement().execute("alter table " + tableName + " set "
+                    + alterAllSyncedPropsString.substring(0, 
alterAllSyncedPropsString.length() - 1));
+            fail("Should fail with SQLException when altering synced 
properties for a"
+                    + " specific column family");
+        } catch (SQLException sqlE) {
+            assertEquals("Should fail to alter synced properties for a 
specific column family",
+                    
SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY.getErrorCode(),
+                    sqlE.getErrorCode());
+        }
+        conn.close();
+    }
+
+    // Test that any alteration of the synced properties gets propagated to 
all indexes and
+    // the physical view index table
+    @Test
+    public void testAlterSyncedPropsPropagateToAllIndexesAndViewIndex() throws 
Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = createBaseTableWithProps(conn);
+        Set<String> tablesToCheck = new HashSet<>();
+        tablesToCheck.add(tableName);
+        for (int i=0; i<2; i++) {
+            tablesToCheck.add(createIndexTable(conn, tableName, 
PTable.IndexType.LOCAL));
+            tablesToCheck.add(createIndexTable(conn, tableName, 
PTable.IndexType.GLOBAL));
+        }
+        // Create a view and view index
+        tablesToCheck.add(createIndexTable(conn, tableName, null));
+
+        // Now alter the base table's properties. This should get propagated 
to all index tables
+        conn.createStatement().execute("alter table " + tableName + " set 
TTL=" + MODIFIED_TTL_VALUE
+                + ",KEEP_DELETED_CELLS=" + MODIFIED_KEEP_DELETED_CELLS_VALUE
+                + ",REPLICATION_SCOPE=" + MODIFIED_REPLICATION_SCOPE_VALUE);
+
+        for (String table: tablesToCheck) {
+            verifyHBaseColumnFamilyProperties(table, conn, true, false);
+        }
+
+        // Any indexes created henceforth should have the modified properties
+        String newGlobalIndex = createIndexTable(conn, tableName, 
PTable.IndexType.GLOBAL);
+        String newViewIndex = createIndexTable(conn, tableName, null);
+        verifyHBaseColumnFamilyProperties(newGlobalIndex, conn, true, false);
+        verifyHBaseColumnFamilyProperties(newViewIndex, conn, true, false);
+        conn.close();
+    }
+
+    // Test that any if we add a column family to a base table, it gets the 
synced properties
+    @Test
+    public void testAlterTableAddColumnFamilyGetsSyncedProps() throws 
Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = createBaseTableWithProps(conn);
+
+        // Test that we are not allowed to set any property to be kept in 
sync, specific
+        // to the new column family to be added
+        for (String propName: SYNCED_DATA_TABLE_AND_INDEX_PROPERTIES) {
+            try {
+                conn.createStatement().execute(
+                        "alter table " + tableName + " add " + NEW_CF + 
".new_column varchar(2) "
+                                + NEW_CF + "." + propName + "=" + 
DUMMY_PROP_VALUE);
+                fail("Should fail with SQLException when altering synced 
property for a"
+                        + " specific column family when adding a column");
+            } catch (SQLException sqlE) {
+                assertEquals("Should fail to alter synced property for a 
specific"
+                                + " column family when adding a column",
+                        
SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY.getErrorCode(),
+                        sqlE.getErrorCode());
+            }
+        }
+
+        // Test that when we add a new column (belonging to a new column 
family) and set any
+        // property that should be in sync, then the property is modified for 
all existing
+        // column families of the base table and its indexes
+        Set<String> tablesToCheck = new HashSet<>();
+        tablesToCheck.add(tableName);
+        for (int i=0; i<2; i++) {
+            tablesToCheck.add(createIndexTable(conn, tableName, 
PTable.IndexType.LOCAL));
+            tablesToCheck.add(createIndexTable(conn, tableName, 
PTable.IndexType.GLOBAL));
+        }
+        // Create a view and view index
+        tablesToCheck.add(createIndexTable(conn, tableName, null));
+
+        // Now add a new column family while simultaneously modifying 
properties to be kept in sync,
+        // as well as a property which does not need to be kept in sync. 
Properties to be kept
+        // in sync should get propagated to all index tables and already 
existing column families
+        conn.createStatement().execute(
+                "alter table " + tableName + " add " + NEW_CF + ".new_column 
varchar(2) "
+                + "KEEP_DELETED_CELLS=" + MODIFIED_KEEP_DELETED_CELLS_VALUE
+                + ",REPLICATION_SCOPE=" + MODIFIED_REPLICATION_SCOPE_VALUE
+                + ",BLOCKSIZE=300000");
+
+        for (String table: tablesToCheck) {
+            verifyHBaseColumnFamilyProperties(table, conn, true, true);
+        }
+        try (Admin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+            HColumnDescriptor[] columnFamilies =
+                    
admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+            for (HColumnDescriptor cfd: columnFamilies) {
+                if (cfd.getNameAsString().equals(NEW_CF)) {
+                    assertEquals("Newly added column fmaily should have 
updated property",
+                            300000, cfd.getBlocksize());
+                } else {
+                    assertEquals("Existing column families should have default 
value for property",
+                            HColumnDescriptor.DEFAULT_BLOCKSIZE, 
cfd.getBlocksize());
+                }
+            }
+        }
+        conn.close();
+    }
+
+    // Test that we disallow altering a synced property for a global index 
table
+    @Test
+    public void testDisallowAlterGlobalIndexTable() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+        String tableName = createBaseTableWithProps(conn);
+        String globalIndexName = createIndexTable(conn, tableName, 
PTable.IndexType.GLOBAL);
+        for (String propName: SYNCED_DATA_TABLE_AND_INDEX_PROPERTIES) {
+            try {
+                conn.createStatement().execute("alter table " + 
globalIndexName + " set "
+                + propName + "=" + DUMMY_PROP_VALUE);
+                fail("Should fail with SQLException when altering synced 
property"
+                        + " for a global index");
+            } catch (SQLException sqlE) {
+                assertEquals("Should fail to alter synced property for a 
global index",
+                        
SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX.getErrorCode(),
+                        sqlE.getErrorCode());
+            }
+        }
+        conn.close();
+    }
+
+    // Test the upgrade code path for old client to new phoenix server cases 
in which the client
+    // may have tables which have column families and indexes whose properties 
are out of sync
+    @Test
+    public void testOldClientSyncPropsUpgradePath() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), new 
Properties());
+
+        String baseTableName = createBaseTableWithProps(conn);
+        String baseTableName1 = createBaseTableWithProps(conn);
+        Set<String> createdTables = new HashSet<>();
+        createdTables.add(baseTableName);
+        createdTables.add(baseTableName1);
+        // Create different indexes on the base table
+        for (int i=0; i<2; i++) {
+            createdTables.add(createIndexTable(conn, baseTableName, 
PTable.IndexType.GLOBAL));
+            createdTables.add(createIndexTable(conn, baseTableName, 
PTable.IndexType.LOCAL));
+            createdTables.add(createIndexTable(conn, baseTableName, null));
+            createdTables.add(createIndexTable(conn, baseTableName1, 
PTable.IndexType.GLOBAL));
+            createdTables.add(createIndexTable(conn, baseTableName1, 
PTable.IndexType.LOCAL));
+            createdTables.add(createIndexTable(conn, baseTableName1, null));
+        }
+        for (String t: createdTables) {
+            verifyHBaseColumnFamilyProperties(t, conn, false, false);
+        }
+
+        try (Admin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+            for (String tableName: createdTables) {
+                final HTableDescriptor tableDescriptor;
+                final HColumnDescriptor defaultCF;
+                if (MetaDataUtil.isViewIndex(tableName)) {
+                    // We won't be able to get the PTable for a view index 
table
+                    tableDescriptor = 
conn.unwrap(PhoenixConnection.class).getQueryServices()
+                            .getTableDescriptor(Bytes.toBytes(tableName));
+                    defaultCF = 
tableDescriptor.getFamily(DEFAULT_COLUMN_FAMILY_BYTES);
+                } else {
+                    PTable table = PhoenixRuntime.getTable(conn, tableName);
+                    tableDescriptor = 
conn.unwrap(PhoenixConnection.class).getQueryServices()
+                            
.getTableDescriptor(table.getPhysicalName().getBytes());
+                    defaultCF = 
tableDescriptor.getFamily(SchemaUtil.getEmptyColumnFamily(table));
+                }
+
+                HTableDescriptor newTableDesc = new 
HTableDescriptor(tableDescriptor);
+                if (tableName.equals(baseTableName) || 
tableName.equals(baseTableName1)) {
+                    for (HColumnDescriptor cfd: 
tableDescriptor.getColumnFamilies()) {
+                        // Modify all column families except the default 
column family
+                        // for the base tables
+                        if (!cfd.equals(defaultCF)) {
+                            HColumnDescriptor newCfd = new 
HColumnDescriptor(cfd);
+                            modifySyncedPropsForCF(newCfd);
+                            newTableDesc.modifyFamily(newCfd);
+                        }
+                    }
+                } else {
+                    for (HColumnDescriptor cfd: 
tableDescriptor.getColumnFamilies()) {
+                        // Modify all column families for other tables
+                        HColumnDescriptor newCfd = new HColumnDescriptor(cfd);
+                        modifySyncedPropsForCF(newCfd);
+                        newTableDesc.modifyFamily(newCfd);
+                    }
+                }
+                admin.modifyTable(newTableDesc.getTableName(), newTableDesc);
+            }
+        }
+        // Now synchronize required properties and verify HBase metadata 
property values
+        syncTableAndIndexProperties(conn.unwrap(PhoenixConnection.class),
+                
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin());
+        for (String t: createdTables) {
+            verifyHBaseColumnFamilyProperties(t, conn, false, false);
+        }
+        conn.close();
+    }
+
+    /**
+     * Helper method to modify the synced properties for a column family 
descriptor
+     * @param cfd The column family descriptor object
+     * @throws SQLException
+     */
+    private void modifySyncedPropsForCF(HColumnDescriptor cfd) throws 
SQLException {
+        cfd.setTimeToLive(MODIFIED_TTL_VALUE);
+        cfd.setKeepDeletedCells(MODIFIED_KEEP_DELETED_CELLS_VALUE);
+        cfd.setScope(MODIFIED_REPLICATION_SCOPE_VALUE);
+    }
+
+    /**
+     * Helper method to create or alter a base table with specific values set 
for properties to be
+     * kept in sync
+     * @param conn Phoenix connection
+     * @return Name of the HBase table created
+     * @throws SQLException
+     */
+    private String createBaseTableWithProps(Connection conn) throws 
SQLException {
+        String tableName = generateUniqueName();
+        conn.createStatement().execute("create table " + tableName
+                + " (id INTEGER not null primary key, type varchar(5), "
+                + COL_FAM1 + ".name varchar(10), " + COL_FAM2 + ".flag 
boolean) "
+                + "TTL=" + INITIAL_TTL_VALUE + ",KEEP_DELETED_CELLS="
+                + INITIAL_KEEP_DELETED_CELLS_VALUE
+                + ",REPLICATION_SCOPE=" + INITIAL_REPLICATION_SCOPE_VALUE);
+        return tableName;
+    }
+
+    /**
+     * Helper method to create an index table on a base table.
+     * @param conn Phoenix connection
+     * @param baseTableName Name of the HBase base table on which to create an 
index
+     * @param indexType LOCAL, GLOBAL or if we pass in null as the indexType,
+     *                 we create a view and an index on that view for the 
given base table
+     * @return The physical HBase table corresponding to the index created
+     * @throws SQLException
+     */
+    private String createIndexTable(Connection conn, String baseTableName,
+            PTable.IndexType indexType) throws SQLException {
+        // Create a view on top of the base table and then an index on that 
view
+        if (indexType == null) {
+            String viewName = "VIEW_" + baseTableName + "_" + 
generateUniqueName();
+            String viewIndexName = VIEW_INDEX_TABLE_PREFIX + baseTableName;
+            conn.createStatement().execute("create view " + viewName
+                    + " (new_col SMALLINT) as select * from " + baseTableName 
+ " where id > 1");
+            conn.createStatement().execute("create index view_index_" + 
generateUniqueName()
+                    + " on " + viewName + " (flag)");
+            return viewIndexName;
+        }
+        switch(indexType) {
+        case LOCAL:
+            String localIndexName = baseTableName + "_LOCAL_" + 
generateUniqueName();
+            conn.createStatement().execute(
+                    "create local index " + localIndexName + " on " + 
baseTableName + "(flag)");
+            return baseTableName;
+        case GLOBAL:
+            String globalIndexName = baseTableName + "_GLOBAL_" + 
generateUniqueName();
+            conn.createStatement()
+                    .execute("create index " + globalIndexName + " on " + 
baseTableName + "(name)");
+            return globalIndexName;
+        }
+        return baseTableName;
+    }
+
+    /**
+     * Helper method to verify HBase column family properties
+     * @param tableName Physical HBase table whose properties are to be 
verified
+     * @param conn Phoenix connection
+     * @param propModified true if we have altered any of the properties to be 
kept in sync, false otherwise
+     * @param ignoreTTL We cannot modfiy a table level property when adding a 
column, so in those cases,
+     *                 ignore the check for TTL modification
+     * @throws Exception
+     */
+    private void verifyHBaseColumnFamilyProperties(String tableName, 
Connection conn, boolean propModified,
+            boolean ignoreTTL) throws Exception {
+        final int expectedTTL = propModified ? 
MODIFIED_TTL_VALUE:INITIAL_TTL_VALUE;
+        final KeepDeletedCells expectedKeepDeletedCells = propModified ?
+                MODIFIED_KEEP_DELETED_CELLS_VALUE: 
INITIAL_KEEP_DELETED_CELLS_VALUE;
+        final int expectedReplicationScope = propModified ?
+                
MODIFIED_REPLICATION_SCOPE_VALUE:INITIAL_REPLICATION_SCOPE_VALUE;
+
+        try (Admin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+            // Note that this includes the local index column family as well
+            HColumnDescriptor[] columnFamilies =
+                    
admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+            for (HColumnDescriptor cfd: columnFamilies) {
+                if (!ignoreTTL) {
+                    assertEquals("Mismatch in TTL", expectedTTL, 
cfd.getTimeToLive());
+                }
+                assertEquals("Mismatch in KEEP_DELETED_CELLS", 
expectedKeepDeletedCells,
+                        cfd.getKeepDeletedCells());
+                assertEquals("Mismatch in REPLICATION_SCOPE", 
expectedReplicationScope,
+                        cfd.getScope());
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index cd84068..76d8e19 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -764,24 +764,27 @@ public class QueryDatabaseMetaDataIT extends 
ParallelStatsDisabledIT {
             }
             admin.createTable(descriptor);
             createMDTestTable(pconn, tableName,
-                "a." + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + 
Boolean.TRUE);
+            "a." + HColumnDescriptor.BLOCKSIZE+ "=" + 50000);
 
             descriptor = admin.getTableDescriptor(htableName);
             assertEquals(3, descriptor.getColumnFamilies().length);
             HColumnDescriptor cdA = descriptor.getFamily(cfA);
-            assertNotEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, 
cdA.getKeepDeletedCellsAsEnum());
+            assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, 
cdA.getKeepDeletedCellsAsEnum());
+            assertNotEquals(HColumnDescriptor.DEFAULT_BLOCKSIZE, 
cdA.getBlocksize());
             assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); 
// Overriden using
                                                                               
// WITH
             assertEquals(1, cdA.getMaxVersions());// Overriden using WITH
             HColumnDescriptor cdB = descriptor.getFamily(cfB);
             // Allow KEEP_DELETED_CELLS to be false for VIEW
             assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, 
cdB.getKeepDeletedCellsAsEnum());
+            assertEquals(HColumnDescriptor.DEFAULT_BLOCKSIZE, 
cdB.getBlocksize());
             assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); 
// Should keep the
                                                                               
// original value.
             // CF c should stay the same since it's not a Phoenix cf.
             HColumnDescriptor cdC = descriptor.getFamily(cfC);
             assertNotNull("Column family not found", cdC);
             assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, 
cdC.getKeepDeletedCellsAsEnum());
+            assertEquals(HColumnDescriptor.DEFAULT_BLOCKSIZE, 
cdC.getBlocksize());
             assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == 
cdC.getDataBlockEncoding());
             
assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
             
assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
index 4b5894c..01d1902 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static 
org.apache.hadoop.hbase.HColumnDescriptor.DEFAULT_REPLICATION_SCOPE;
+import static org.apache.hadoop.hbase.HColumnDescriptor.DEFAULT_BLOCKSIZE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -165,7 +166,8 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
         assertImmutableRows(conn, dataTableFullName, true);
         ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED = 
FALSE, VERSIONS = 10";
         conn.createStatement().execute(ddl);
-        ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED = 
FALSE, CF1.MIN_VERSIONS = 1, CF2.MIN_VERSIONS = 3, MIN_VERSIONS = 8, 
CF1.KEEP_DELETED_CELLS = true, KEEP_DELETED_CELLS = false";
+        ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED = 
FALSE, CF1.MIN_VERSIONS = 1, CF2.MIN_VERSIONS = 3, " +
+                "MIN_VERSIONS = 8, CF1.BLOCKSIZE = 50000, KEEP_DELETED_CELLS = 
false";
         conn.createStatement().execute(ddl);
 
         try (HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
@@ -176,16 +178,19 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
             assertEquals("0", columnFamilies[0].getNameAsString());
             assertEquals(8, columnFamilies[0].getMinVersions());
             assertEquals(10, columnFamilies[0].getMaxVersions());
+            assertEquals(HColumnDescriptor.DEFAULT_BLOCKSIZE, 
columnFamilies[0].getBlocksize());
             assertEquals(KeepDeletedCells.FALSE, 
columnFamilies[0].getKeepDeletedCellsAsEnum());
 
             assertEquals("CF1", columnFamilies[1].getNameAsString());
             assertEquals(1, columnFamilies[1].getMinVersions());
             assertEquals(10, columnFamilies[1].getMaxVersions());
-            assertEquals(KeepDeletedCells.TRUE, 
columnFamilies[1].getKeepDeletedCellsAsEnum());
+            assertEquals(50000, columnFamilies[1].getBlocksize());
+            assertEquals(KeepDeletedCells.FALSE, 
columnFamilies[1].getKeepDeletedCellsAsEnum());
 
             assertEquals("CF2", columnFamilies[2].getNameAsString());
             assertEquals(3, columnFamilies[2].getMinVersions());
             assertEquals(10, columnFamilies[2].getMaxVersions());
+            assertEquals(HColumnDescriptor.DEFAULT_BLOCKSIZE, 
columnFamilies[2].getBlocksize());
             assertEquals(KeepDeletedCells.FALSE, 
columnFamilies[2].getKeepDeletedCellsAsEnum());
 
             assertEquals(Boolean.toString(false), 
tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED));
@@ -251,7 +256,7 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
             conn1.createStatement().execute(ddl);
             fail();
         } catch (SQLException e) {
-            
assertEquals(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL.getErrorCode(), 
e.getErrorCode());
+            
assertEquals(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY.getErrorCode(),
 e.getErrorCode());
         }
     }
 
@@ -267,7 +272,7 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
                 +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) " + 
generateDDLOptions("SALT_BUCKETS = 8");
         Connection conn1 = DriverManager.getConnection(getUrl(), props);
         conn1.createStatement().execute(ddl);
-        ddl = "ALTER TABLE " + dataTableFullName + " SET CF.REPLICATION_SCOPE 
= 1";
+        ddl = "ALTER TABLE " + dataTableFullName + " SET CF.BLOCKSIZE = 50000";
         try {
             conn1.createStatement().execute(ddl);
             fail();
@@ -384,7 +389,7 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
         conn.createStatement().execute(ddl);
         assertImmutableRows(conn, dataTableFullName, true);
         ddl = "ALTER TABLE  " + dataTableFullName
-                + " SET COMPACTION_ENABLED = FALSE, CF.REPLICATION_SCOPE=1, 
IMMUTABLE_ROWS = TRUE, TTL=1000";
+                + " SET COMPACTION_ENABLED = FALSE, CF.BLOCKSIZE=50000, 
IMMUTABLE_ROWS = TRUE, TTL=1000";
         conn.createStatement().execute(ddl);
         assertImmutableRows(conn, dataTableFullName, true);
         try (HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
@@ -392,11 +397,13 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
             HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
             assertEquals(2, columnFamilies.length);
             assertEquals("CF", columnFamilies[0].getNameAsString());
-            assertEquals(1, columnFamilies[0].getScope());
+            assertEquals(DEFAULT_REPLICATION_SCOPE, 
columnFamilies[0].getScope());
             assertEquals(1000, columnFamilies[0].getTimeToLive());
+            assertEquals(50000, columnFamilies[0].getBlocksize());
             assertEquals("XYZ", columnFamilies[1].getNameAsString());
             assertEquals(DEFAULT_REPLICATION_SCOPE, 
columnFamilies[1].getScope());
             assertEquals(1000, columnFamilies[1].getTimeToLive());
+            assertEquals(DEFAULT_BLOCKSIZE, columnFamilies[1].getBlocksize());
             assertEquals(Boolean.toString(false), 
tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED));
         }
     }
@@ -446,20 +453,21 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
                     .execute(
                             "ALTER TABLE "
                                     + dataTableFullName
-                                    + " ADD col4 integer, CF1.col5 integer, 
CF2.col6 integer IN_MEMORY=true, CF1.REPLICATION_SCOPE=1, CF2.IN_MEMORY=false 
");
+                                    + " ADD col4 integer, CF1.col5 integer, 
CF2.col6 integer IN_MEMORY=true, CF1.BLOCKSIZE=50000, "
+                                    + "CF2.IN_MEMORY=false, 
REPLICATION_SCOPE=1 ");
             try (HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
-                HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
-                        .getColumnFamilies();
+                HColumnDescriptor[] columnFamilies = admin
+                        
.getTableDescriptor(Bytes.toBytes(dataTableFullName)).getColumnFamilies();
                 assertEquals(3, columnFamilies.length);
                 assertEquals("0", columnFamilies[0].getNameAsString());
                 assertTrue(columnFamilies[0].isInMemory());
-                assertEquals(0, columnFamilies[0].getScope());
+                assertEquals(1, columnFamilies[0].getScope());
                 assertEquals("CF1", columnFamilies[1].getNameAsString());
                 assertTrue(columnFamilies[1].isInMemory());
                 assertEquals(1, columnFamilies[1].getScope());
                 assertEquals("CF2", columnFamilies[2].getNameAsString());
                 assertFalse(columnFamilies[2].isInMemory());
-                assertEquals(0, columnFamilies[2].getScope());
+                assertEquals(1, columnFamilies[2].getScope());
             }
         } finally {
             conn.close();
@@ -480,17 +488,19 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
                     .execute(
                             "ALTER TABLE "
                                     + dataTableFullName
-                                    + " ADD col4 integer, CF1.col5 integer, 
CF2.col6 integer IN_MEMORY=true, CF1.REPLICATION_SCOPE=1, CF2.IN_MEMORY=false, 
XYZ.REPLICATION_SCOPE=1 ");
-            try (HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
-                HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
-                        .getColumnFamilies();
+                                    + " ADD col4 integer, CF1.col5 integer, 
CF2.col6 integer IN_MEMORY=true, CF1.BLOCKSIZE=50000, "
+                                    + "CF2.IN_MEMORY=false, 
REPLICATION_SCOPE=1 ");
+                try (HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+                    HColumnDescriptor[] columnFamilies = admin
+                            
.getTableDescriptor(Bytes.toBytes(dataTableFullName)).getColumnFamilies();
                 assertEquals(3, columnFamilies.length);
                 assertEquals("CF1", columnFamilies[0].getNameAsString());
                 assertTrue(columnFamilies[0].isInMemory());
                 assertEquals(1, columnFamilies[0].getScope());
+                assertEquals(50000, columnFamilies[0].getBlocksize());
                 assertEquals("CF2", columnFamilies[1].getNameAsString());
                 assertFalse(columnFamilies[1].isInMemory());
-                assertEquals(0, columnFamilies[1].getScope());
+                assertEquals(1, columnFamilies[1].getScope());
                 assertEquals("XYZ", columnFamilies[2].getNameAsString());
                 assertTrue(columnFamilies[2].isInMemory());
                 assertEquals(1, columnFamilies[2].getScope());
@@ -513,7 +523,7 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
             try {
                 conn.createStatement().execute(
                         "ALTER TABLE " + dataTableFullName
-                                + " ADD col4 integer CF1.REPLICATION_SCOPE=1, 
XYZ.IN_MEMORY=true ");
+                                + " ADD col4 integer CF1.BLOCKSIZE=50000, 
XYZ.IN_MEMORY=true ");
                 fail();
             } catch(SQLException e) {
                 
assertEquals(SQLExceptionCode.CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED.getErrorCode(),
 e.getErrorCode());
@@ -537,14 +547,16 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
                     .execute(
                             "ALTER TABLE "
                                     + dataTableFullName
-                                    + " ADD col4 integer, CF1.col5 integer, 
CF2.col6 integer, CF3.col7 integer CF1.REPLICATION_SCOPE=1, 
CF1.IN_MEMORY=false, IN_MEMORY=true ");
-            try (HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
-                HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
+                                    + " ADD col4 integer, CF1.col5 integer, 
CF2.col6 integer, CF3.col7 integer CF1.BLOCKSIZE=50000,"
+                                    + " CF1.IN_MEMORY=false, IN_MEMORY=true ");
+                try (HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+                    HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
                         .getColumnFamilies();
                 assertEquals(4, columnFamilies.length);
                 assertEquals("CF1", columnFamilies[0].getNameAsString());
                 assertFalse(columnFamilies[0].isInMemory());
-                assertEquals(1, columnFamilies[0].getScope());
+                assertEquals(0, columnFamilies[0].getScope());
+                assertEquals(50000, columnFamilies[0].getBlocksize());
                 assertEquals("CF2", columnFamilies[1].getNameAsString());
                 assertTrue(columnFamilies[1].isInMemory());
                 assertEquals(0, columnFamilies[1].getScope());
@@ -571,7 +583,7 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
         try {
             conn.createStatement().execute(ddl);
             conn.createStatement().execute(
-                    "ALTER TABLE " + dataTableFullName + " ADD col4 integer 
XYZ.REPLICATION_SCOPE=1 ");
+                    "ALTER TABLE " + dataTableFullName + " ADD col4 integer 
REPLICATION_SCOPE=1, XYZ.BLOCKSIZE=50000");
             conn.createStatement()
                     .execute("ALTER TABLE " + dataTableFullName + " ADD 
XYZ.col5 integer IN_MEMORY=true ");
             try (HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
@@ -580,10 +592,12 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
                 assertEquals(2, columnFamilies.length);
                 assertEquals("CF1", columnFamilies[0].getNameAsString());
                 assertFalse(columnFamilies[0].isInMemory());
-                assertEquals(0, columnFamilies[0].getScope());
+                assertEquals(1, columnFamilies[0].getScope());
+                assertEquals(DEFAULT_BLOCKSIZE, 
columnFamilies[0].getBlocksize());
                 assertEquals("XYZ", columnFamilies[1].getNameAsString());
                 assertTrue(columnFamilies[1].isInMemory());
                 assertEquals(1, columnFamilies[1].getScope());
+                assertEquals(50000, columnFamilies[1].getBlocksize());
             }
         } finally {
             conn.close();
@@ -728,7 +742,7 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
             conn.createStatement().execute(ddl);
             fail();
         } catch (SQLException e) {
-            
assertEquals(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL.getErrorCode(), 
e.getErrorCode());
+            
assertEquals(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY.getErrorCode(),
 e.getErrorCode());
         } finally {
             conn.close();
         }
@@ -923,7 +937,7 @@ public abstract class SetPropertyIT extends 
ParallelStatsDisabledIT {
                 assertEquals(1000, columnFamilies[0].getTimeToLive());
                 assertEquals("XYZ", columnFamilies[1].getNameAsString());
                 assertEquals(false, columnFamilies[1].isInMemory());
-                assertEquals(86400, columnFamilies[1].getTimeToLive());
+                assertEquals(1000, columnFamilies[1].getTimeToLive());
             }
 
             // the new column will be assigned to the column family XYZ. With 
the a KV column getting added for XYZ,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
index 73cf1f0..482ad5a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end;
 
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index b1866db..b8eb70b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -327,8 +327,8 @@ public class TransactionIT  extends ParallelStatsDisabledIT 
{
 
         Connection conn = DriverManager.getConnection(getUrl());
         conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "1(k 
INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR) TTL=1000");
-        conn.createStatement().execute("CREATE INDEX " + idx1 + " ON " + 
nonTxTableName + "1(a.v, b.v) TTL=1000");
-        conn.createStatement().execute("CREATE INDEX " + idx2 + " ON " + 
nonTxTableName + "1(c.v) INCLUDE (a.v, b.v) TTL=1000");
+        conn.createStatement().execute("CREATE INDEX " + idx1 + " ON " + 
nonTxTableName + "1(a.v, b.v)");
+        conn.createStatement().execute("CREATE INDEX " + idx2 + " ON " + 
nonTxTableName + "1(c.v) INCLUDE (a.v, b.v)");
 
         try {
             conn.createStatement().execute("ALTER TABLE " + nonTxTableName + 
"1 SET TRANSACTIONAL=true," + tableDDLOptions);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index c0a81ec..d84857d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -260,7 +260,9 @@ public enum SQLExceptionCode {
     TOO_MANY_INDEXES(1047, "43A04", "Too many indexes have already been 
created on the physical table."),
     NO_LOCAL_INDEX_ON_TABLE_WITH_IMMUTABLE_ROWS(1048,"43A05","Local indexes 
aren't allowed on tables with immutable rows."),
     COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY(1049, "43A06", "Column family not 
allowed for table properties."),
-    COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL(1050, "43A07", "Setting TTL for a column 
family not supported. You can only have TTL for the entire table."),
+    COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY(1050, "43A07", "Setting  or 
altering any of the following properties: "
+            + MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_PROPERTIES.toString()
+            + " for a column family is not supported since they must be kept 
in sync. You can only set these properties for the entire table."),
     CANNOT_ALTER_PROPERTY(1051, "43A08", "Property can be specified or changed 
only when creating a table."),
     CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED(1052, "43A09", "Property cannot 
be specified for a column family that is not being added or modified."),
     CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN(1053, "43A10", "Table level property 
cannot be set when adding a column."),
@@ -303,6 +305,8 @@ public enum SQLExceptionCode {
     CANNOT_SWITCH_TXN_PROVIDERS(1096, "44A27", "Cannot switch transaction 
providers."),
     TTL_UNSUPPORTED_FOR_TXN_TABLE(10947, "44A28", "TTL is not supported for"),
     CANNOT_CREATE_LOCAL_INDEX_FOR_TXN_TABLE(10948, "44A29", "Local indexes 
cannot be created for"),
+    CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX(10949, "44A30", "Cannot set or 
alter the following properties on an index: "
+            + MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_PROPERTIES.toString()),
 
     /** Sequence related */
     SEQUENCE_ALREADY_EXIST(1200, "42Z00", "Sequence already exists.", new 
Factory() {

Reply via email to