This is an automated email from the ASF dual-hosted git repository.

skadam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 22663ca  PHOENIX-5261: Implement ALTER TABLE ADD COLUMN CASCADE (#864)
22663ca is described below

commit 22663cac7acd38757fa04da42a51bb516977c804
Author: Swaroopa Kadam <swaroopa.kada...@gmail.com>
AuthorDate: Mon Sep 21 14:50:36 2020 -0700

    PHOENIX-5261: Implement ALTER TABLE ADD COLUMN CASCADE (#864)
    
    Co-authored-by: Swaroopa Kadam <s.ka...@apache.org>
---
 .../phoenix/end2end/AlterAddCascadeIndexIT.java    | 363 +++++++++++++++++++++
 phoenix-core/src/main/antlr3/PhoenixSQL.g          |   9 +-
 .../apache/phoenix/exception/SQLExceptionCode.java |   8 +-
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  |   8 +-
 .../apache/phoenix/parse/AddColumnStatement.java   |  12 +-
 .../org/apache/phoenix/parse/ParseNodeFactory.java |   4 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  | 122 ++++++-
 7 files changed, 509 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterAddCascadeIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterAddCascadeIndexIT.java
new file mode 100644
index 0000000..80c9f98
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterAddCascadeIndexIT.java
@@ -0,0 +1,363 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PFloat;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ColumnInfo;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+@RunWith(Parameterized.class)
+public class AlterAddCascadeIndexIT extends ParallelStatsDisabledIT {
+
+    public static final String SYNTAX_ERROR = "Syntax error";
+    @Rule
+    public ExpectedException exception = ExpectedException.none();
+    private static Connection conn;
+    private Properties prop;
+    private boolean isViewIndex;
+    private String phoenixObjectName;
+    private String indexNames;
+    private final String tableDDLOptions;
+    String fullIndexNameOne, fullIndexNameTwo;
+
+
+    public AlterAddCascadeIndexIT(boolean isViewIndex, boolean mutable) {
+        this.isViewIndex = isViewIndex;
+        StringBuilder optionBuilder = new StringBuilder();
+        if (!mutable) {
+            optionBuilder.append(" IMMUTABLE_ROWS=true");
+        }
+        this.tableDDLOptions = optionBuilder.toString();
+    }
+
+    @Parameters(name="AlterAddCascadeIndexIT_isViewIndex={0},mutable={1}")
+    public static Collection<Object[]> data() {
+        return Arrays.asList(new Object[][] {
+                { true, true},
+                { true, false},
+                { false, true},
+                { false, false},
+        });
+    }
+
+    @Before
+    public void setup() throws SQLException {
+        prop = new Properties();
+        conn = DriverManager.getConnection(getUrl(), prop);
+        conn.setAutoCommit(true);
+        String schemaName = "S_"+generateUniqueName();
+        String indexNameOne = "I_"+generateUniqueName();
+        String indexNameTwo = "I_"+generateUniqueName();
+        String tableName = "T_"+generateUniqueName();
+        String viewName = "V_"+generateUniqueName();
+        String fullViewName = SchemaUtil.getQualifiedTableName(schemaName, 
viewName);
+        String fullTableName = SchemaUtil.getQualifiedTableName(schemaName, 
tableName);
+        conn.createStatement().execute("CREATE TABLE IF NOT EXISTS " + 
fullTableName + " (\n" +
+                "      state CHAR(2) NOT NULL,\n" +
+                "      city VARCHAR NOT NULL,\n" +
+                "      population BIGINT,\n" +
+                "      CONSTRAINT my_pk PRIMARY KEY (state, city)) " + 
tableDDLOptions);
+
+        if(isViewIndex) {
+            conn.createStatement().execute("CREATE VIEW IF NOT EXISTS " + 
fullViewName +
+                    " (city_area INTEGER, avg_fam_size INTEGER) AS " +
+                    "SELECT * FROM "+fullTableName+" WHERE state = 'CA'");
+
+            conn.createStatement().execute("CREATE INDEX IF NOT EXISTS " + 
indexNameOne + " ON " +
+                    fullViewName+" (city_area) INCLUDE (population)");
+            conn.createStatement().execute("CREATE INDEX IF NOT EXISTS " + 
indexNameTwo + " ON " +
+                    fullViewName+" (avg_fam_size) INCLUDE (population)");
+            phoenixObjectName = fullViewName;
+        } else {
+            conn.createStatement().execute("CREATE INDEX IF NOT EXISTS " + 
indexNameOne + " ON " +
+                    fullTableName+" (population)");
+            conn.createStatement().execute("CREATE INDEX IF NOT EXISTS " + 
indexNameTwo + " ON " +
+                    fullTableName+" (state, population)");
+            phoenixObjectName = fullTableName;
+        }
+        fullIndexNameOne = SchemaUtil.getQualifiedTableName(schemaName, 
indexNameOne);
+        fullIndexNameTwo = SchemaUtil.getQualifiedTableName(schemaName, 
indexNameTwo);
+        indexNames = indexNameOne +", "+indexNameTwo;
+    }
+
+
+    // Test with ALTER TABLE/VIEW CASCADE INDEX ALL with upserting into new 
column
+    @Test
+    public void testAlterDBOAddCascadeIndexAllUpsert() throws Exception {
+        String query = "ALTER " +(isViewIndex? "VIEW " : "TABLE ") + 
phoenixObjectName + " ADD new_column_3 VARCHAR(64) CASCADE INDEX ALL";
+        conn.createStatement().execute(query);
+        PreparedStatement ps;
+        if(isViewIndex) {
+            ps = conn.prepareStatement("UPSERT INTO " + phoenixObjectName +
+                    
"(state,city,population,city_area,avg_fam_size,new_column_3) " +
+                    "VALUES('CA','Santa 
Barbara',912332,1300,4,'test_column')");
+        } else {
+            ps = conn.prepareStatement("UPSERT INTO " + phoenixObjectName +
+                    "(state,city,population,new_column_3) " +
+                    "VALUES('CA','Santa Barbara',912332,'test_column')");
+        }
+        ps.executeUpdate();
+        ColumnInfo [] columnArray =  {new ColumnInfo("new_column_3", 
PVarchar.INSTANCE.getSqlType(), 64)};
+        ColumnInfo [] columnIndexArray =  {new ColumnInfo("0:new_column_3", 
PVarchar.INSTANCE.getSqlType(), 64)};
+        if(isViewIndex) {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.VIEW, 6, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 5, 
columnIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 5, 
columnIndexArray, false);
+        } else {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.TABLE, 4, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 4, 
columnIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 4, 
columnIndexArray, false);
+        }
+
+    }
+
+    // Test with CASCADE INDEX <index_name>
+    @Test
+    public void testAlterDBOAddCascadeIndex() throws Exception {
+        ColumnInfo [] columnArray =  {new ColumnInfo("new_column_1", 
PFloat.INSTANCE.getSqlType())};
+        ColumnInfo [] columnIndexArray =  {new ColumnInfo("0:new_column_1", 
PDecimal.INSTANCE.getSqlType())};
+
+        String query = "ALTER " + (isViewIndex? "VIEW " : "TABLE ")
+                + phoenixObjectName + " ADD new_column_1 FLOAT CASCADE INDEX " 
+ indexNames.split(",")[0];
+        conn.createStatement().execute(query);
+        if(isViewIndex) {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.VIEW, 6, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 5, 
columnIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 4, 
columnIndexArray, true);
+        } else {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.TABLE, 4, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 4, 
columnIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 3, 
columnIndexArray, true);
+        }
+    }
+
+    // Test with CASCADE INDEX <index_name>
+    @Test
+    public void testAlterDBOAddCascadeTwoColsOneIndex() throws Exception {
+        ColumnInfo [] columnArray =  {new ColumnInfo("new_column_1", 
PFloat.INSTANCE.getSqlType()),
+                new ColumnInfo("new_column_2", PDouble.INSTANCE.getSqlType())};
+        ColumnInfo [] columnIndexArray =  {new ColumnInfo("0:new_column_1", 
PDecimal.INSTANCE.getSqlType()),
+                new ColumnInfo("0:new_column_2", 
PDecimal.INSTANCE.getSqlType())};
+        String query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ") + 
phoenixObjectName
+                + " ADD new_column_1 FLOAT, new_column_2 DOUBLE CASCADE INDEX 
" + indexNames.split(",")[0];
+        conn.createStatement().execute(query);
+        if(isViewIndex) {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.VIEW, 7, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 6, 
columnIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 4, 
columnIndexArray, true);
+        } else {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.TABLE, 5, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 5, 
columnIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 3, 
columnIndexArray, true);
+        }
+
+    }
+
+    // Test with CASCADE INDEX <index_name>, <index_name>
+    @Test
+    public void testAlterDBOAddCascadeIndexes() throws Exception {
+        ColumnInfo [] columnArray = {new ColumnInfo("new_column_1", 
PDouble.INSTANCE.getSqlType())};
+        ColumnInfo [] columnIndexArray = {new ColumnInfo("0:new_column_1", 
PDecimal.INSTANCE.getSqlType())};
+        String query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ")
+                + phoenixObjectName + " ADD new_column_1 DOUBLE CASCADE INDEX 
" + indexNames;
+        conn.createStatement().execute(query);
+        if(isViewIndex) {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.VIEW, 6, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 5, 
columnIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 5, 
columnIndexArray, false);
+        } else {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.TABLE, 4, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 4, 
columnIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 4, 
columnIndexArray, false);
+        }
+    }
+
+    // Test with CASCADE INDEX <index_name>, <index_name>
+    @Test
+    public void testAlterDBOAddCascadeTwoColsTwoIndexes() throws Exception {
+        ColumnInfo [] columnArray =  {new ColumnInfo("new_column_1", 
PFloat.INSTANCE.getSqlType()),
+                new ColumnInfo("new_column_2", PDouble.INSTANCE.getSqlType())};
+        ColumnInfo [] columIndexArray =  {new ColumnInfo("0:new_column_1", 
PDecimal.INSTANCE.getSqlType()),
+                new ColumnInfo("0:new_column_2", 
PDecimal.INSTANCE.getSqlType())};
+
+        String query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ")
+                + phoenixObjectName + " ADD new_column_1 FLOAT, new_column_2 
DOUBLE CASCADE INDEX " + indexNames;
+        conn.createStatement().execute(query);
+        if(isViewIndex) {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.VIEW, 7, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 6, 
columIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 6, 
columIndexArray, false);
+        } else {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.TABLE, 5, 
columnArray, false);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 5, 
columIndexArray, false);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 5, 
columIndexArray, false);
+        }
+
+    }
+
+    // Exception for invalid grammar
+    @Test
+    public void testAlterDBOException() throws SQLException {
+
+        String query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ") + 
phoenixObjectName + " ADD new_column VARCHAR ALL";
+        try {
+            conn.createStatement().execute(query);
+        } catch (Exception e) {
+            assertTrue(e.getMessage().contains(SYNTAX_ERROR));
+        }
+
+        query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ") + 
phoenixObjectName
+                + " ADD new_column VARCHAR CASCADE " + 
indexNames.split(",")[0];
+        try {
+            conn.createStatement().execute(query);
+        } catch (Exception e) {
+            assertTrue(e.getMessage().contains(SYNTAX_ERROR));
+        }
+
+        query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ") + 
phoenixObjectName
+                + " ADD new_column VARCHAR INDEX " + indexNames.split(",")[0];
+        try {
+            conn.createStatement().execute(query);
+        } catch (Exception e) {
+            assertTrue(e.getMessage().contains(SYNTAX_ERROR));
+        }
+
+        query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ")
+                + phoenixObjectName + " ADD new_column_1 DOUBLE CASCADE INDEX 
INCORRECT_NAME";
+        try {
+            conn.createStatement().execute(query);
+        } catch (Exception e) {
+            
assertTrue(e.getMessage().contains(SQLExceptionCode.INCORRECT_INDEX_NAME.getMessage()));
+        }
+
+        String localIndex = generateUniqueName();
+        String createLocalIndex = "CREATE LOCAL INDEX " + localIndex + " ON "
+                + phoenixObjectName + "(avg_fam_size) INCLUDE (population)";
+        if(!isViewIndex) {
+            createLocalIndex = "CREATE LOCAL INDEX " + localIndex + " ON "
+                    + phoenixObjectName + "(state, population)";
+
+        }
+        conn.createStatement().execute(createLocalIndex);
+        query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ")
+                + phoenixObjectName + " ADD new_column_1 DOUBLE CASCADE INDEX 
"+localIndex;
+        try {
+            conn.createStatement().execute(query);
+        } catch (Exception e) {
+            assertTrue(e.getMessage().contains(SQLExceptionCode
+                    .NOT_SUPPORTED_CASCADE_FEATURE_LOCAL_INDEX.getMessage()));
+        }
+
+        query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ")
+                + phoenixObjectName + " ADD new_column_2 DOUBLE CASCADE INDEX 
"+localIndex + "," + indexNames;
+        try {
+            conn.createStatement().execute(query);
+        } catch (Exception e) {
+            assertTrue(e.getMessage().contains(SQLExceptionCode
+                    .NOT_SUPPORTED_CASCADE_FEATURE_LOCAL_INDEX.getMessage()));
+
+        }
+    }
+
+    // Exception for incorrect index name
+    @Test
+    public void testAlterDBOIncorrectIndexNameCombination() throws Exception {
+        String query = "ALTER " + (isViewIndex ? "VIEW " : "TABLE ")
+                + phoenixObjectName + " ADD new_column_1 DOUBLE CASCADE INDEX 
INCORRECT_NAME, "+ indexNames;
+        try {
+            conn.createStatement().execute(query);
+        } catch (Exception e) {
+            
assertTrue(e.getMessage().contains(SQLExceptionCode.INCORRECT_INDEX_NAME.getMessage()));
+        }
+        ColumnInfo [] columnArray =  {new ColumnInfo("new_column_1", 
PFloat.INSTANCE.getSqlType())};
+        ColumnInfo [] columnIndexArray =  {new ColumnInfo("0:new_column_1", 
PDecimal.INSTANCE.getSqlType())};
+        if(isViewIndex) {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.VIEW, 5, 
columnArray, true);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 4, 
columnIndexArray, true);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 4, 
columnIndexArray, true);
+        } else {
+            assertDBODefinition(conn, phoenixObjectName, PTableType.TABLE, 3, 
columnArray, true);
+            assertDBODefinition(conn, fullIndexNameOne, PTableType.INDEX, 3, 
columnIndexArray, true);
+            assertDBODefinition(conn, fullIndexNameTwo, PTableType.INDEX, 3, 
columnIndexArray, true);
+        }
+
+    }
+
+    private void assertDBODefinition(Connection conn, String 
phoenixObjectName, PTableType pTableType, int baseColumnCount,  ColumnInfo [] 
columnInfo, boolean fail)
+            throws Exception {
+        String schemaName = 
SchemaUtil.getSchemaNameFromFullName(phoenixObjectName);
+        String tableName = 
SchemaUtil.getTableNameFromFullName(phoenixObjectName);
+        PreparedStatement p = conn.prepareStatement("SELECT * FROM 
SYSTEM.CATALOG "
+                + "WHERE TABLE_SCHEM=? AND TABLE_NAME=? AND TABLE_TYPE=?");
+        p.setString(1, schemaName.toUpperCase());
+        p.setString(2, tableName.toUpperCase());
+        p.setString(3, pTableType.getSerializedValue());
+        ResultSet rs = p.executeQuery();
+        assertTrue(rs.next());
+        assertEquals("Mismatch in ColumnCount", baseColumnCount, 
rs.getInt("COLUMN_COUNT"));
+        p = conn.prepareStatement("SELECT * FROM SYSTEM.CATALOG "
+                + "WHERE TABLE_SCHEM=? AND TABLE_NAME=? AND COLUMN_NAME=? AND 
DATA_TYPE=?");
+        p.setString(1, schemaName.toUpperCase());
+        p.setString(2, tableName.toUpperCase());
+
+        int iPos = baseColumnCount - columnInfo.length + 1;
+        for(ColumnInfo column: columnInfo) {
+            p.setString(3, column.getDisplayName().toUpperCase());
+            p.setInt(4, column.getSqlType());
+            rs = p.executeQuery();
+            if(!fail) {
+                assertTrue(rs.next());
+                assertEquals(iPos, rs.getInt("ORDINAL_POSITION"));
+                iPos++;
+            } else {
+                assertFalse(rs.next());
+            }
+        }
+        rs.close();
+        p.close();
+    }
+
+}
\ No newline at end of file
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g 
b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index a526255..2c23114 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -670,8 +670,8 @@ alter_session_node returns [AlterSessionStatement ret]
 // Parse an alter table statement.
 alter_table_node returns [AlterTableStatement ret]
     :   ALTER (TABLE | v=VIEW) t=from_table_name
-        ( (DROP COLUMN (IF ex=EXISTS)? c=column_names) | (ADD (IF NOT 
ex=EXISTS)? (d=column_defs) (p=fam_properties)?) | (SET (p=fam_properties)) )
-        { PTableType tt = v==null ? 
(QueryConstants.SYSTEM_SCHEMA_NAME.equals(t.getSchemaName()) ? 
PTableType.SYSTEM : PTableType.TABLE) : PTableType.VIEW; ret = ( c == null ? 
factory.addColumn(factory.namedTable(null,t), tt, d, ex!=null, p) : 
factory.dropColumn(factory.namedTable(null,t), tt, c, ex!=null) ); }
+        ( (DROP COLUMN (IF ex=EXISTS)? c=column_names) | (ADD (IF NOT 
ex=EXISTS)? (d=column_defs) (p=fam_properties)?) (cas=CASCADE INDEX 
(list=indexes | all=ALL))? | (SET (p=fam_properties)) )
+        { PTableType tt = v==null ? 
(QueryConstants.SYSTEM_SCHEMA_NAME.equals(t.getSchemaName()) ? 
PTableType.SYSTEM : PTableType.TABLE) : PTableType.VIEW; ret = ( c == null ? 
factory.addColumn(factory.namedTable(null,t), tt, d, ex!=null, p, cas!=null, 
(all == null ? list : null)) : factory.dropColumn(factory.namedTable(null,t), 
tt, c, ex!=null) ); }
     ;
 
 update_statistics_node returns [UpdateStatisticsStatement ret]
@@ -698,6 +698,11 @@ column_defs returns [List<ColumnDef> ret]
     :  v = column_def {$ret.add(v);}  (COMMA v = column_def {$ret.add(v);} )*
 ;
 
+indexes returns [List<NamedNode> ret]
+@init{ret = new ArrayList<NamedNode>(); }
+    :  v = index_name {$ret.add(v);}  (COMMA v = index_name {$ret.add(v);} )*
+;
+
 column_def returns [ColumnDef ret]
     :   c=column_name dt=identifier (LPAREN l=NUMBER (COMMA s=NUMBER)? 
RPAREN)? ar=ARRAY? (lsq=LSQUARE (a=NUMBER)? RSQUARE)? (nn=NOT? n=NULL)? 
(DEFAULT df=expression)? (pk=PRIMARY KEY (order=ASC|order=DESC)? 
rr=ROW_TIMESTAMP?)?
         { $ret = factory.columnDef(c, dt, ar != null || lsq != null, a == null 
? null :  Integer.parseInt( a.getText() ), nn!=null ? Boolean.FALSE : n!=null ? 
Boolean.TRUE : null, 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index ba81de2..bb4fdfd 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -523,7 +523,13 @@ public enum SQLExceptionCode {
             "Cannot use a connection with SCN set to upsert data for a table 
with indexes."),
 
     CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS(904, "43M15",
-            "Cannot perform DDL with pending mutations. Commit or rollback 
mutations before performing DDL");
+            "Cannot perform DDL with pending mutations. Commit or rollback 
mutations before performing DDL"),
+
+    NOT_SUPPORTED_CASCADE_FEATURE_PK(905, "43M16", "CASCADE INDEX feature is 
not supported to add new PK column in INDEX"),
+
+    INCORRECT_INDEX_NAME(906, "43M17", "The list contains one or more 
incorrect index name(s)"),
+
+    NOT_SUPPORTED_CASCADE_FEATURE_LOCAL_INDEX(907, "43M18", "CASCADE INDEX 
feature is not supported for local index");
 
     private final int errorCode;
     private final String sqlState;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index bb437d1..1990148 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -1499,8 +1499,8 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
 
     private static class ExecutableAddColumnStatement extends 
AddColumnStatement implements CompilableStatement {
 
-        ExecutableAddColumnStatement(NamedTableNode table, PTableType 
tableType, List<ColumnDef> columnDefs, boolean ifNotExists, 
ListMultimap<String,Pair<String,Object>> props) {
-            super(table, tableType, columnDefs, ifNotExists, props);
+        ExecutableAddColumnStatement(NamedTableNode table, PTableType 
tableType, List<ColumnDef> columnDefs, boolean ifNotExists, 
ListMultimap<String,Pair<String,Object>> props, boolean cascade, 
List<NamedNode> indexes) {
+            super(table, tableType, columnDefs, ifNotExists, props, cascade, 
indexes);
         }
 
         @SuppressWarnings("unchecked")
@@ -1641,8 +1641,8 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
         }
         
         @Override
-        public AddColumnStatement addColumn(NamedTableNode table,  PTableType 
tableType, List<ColumnDef> columnDefs, boolean ifNotExists, 
ListMultimap<String,Pair<String,Object>> props) {
-            return new ExecutableAddColumnStatement(table, tableType, 
columnDefs, ifNotExists, props);
+        public AddColumnStatement addColumn(NamedTableNode table,  PTableType 
tableType, List<ColumnDef> columnDefs, boolean ifNotExists, 
ListMultimap<String,Pair<String,Object>> props, boolean cascade, 
List<NamedNode> indexes) {
+            return new ExecutableAddColumnStatement(table, tableType, 
columnDefs, ifNotExists, props, cascade, indexes);
         }
         
         @Override
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/AddColumnStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/AddColumnStatement.java
index 9ef0b80..32b4ebe 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/AddColumnStatement.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/AddColumnStatement.java
@@ -29,12 +29,16 @@ public class AddColumnStatement extends AlterTableStatement 
{
     private final List<ColumnDef> columnDefs;
     private final boolean ifNotExists;
     private final ListMultimap<String,Pair<String,Object>> props;
-    
-    protected AddColumnStatement(NamedTableNode table, PTableType tableType, 
List<ColumnDef> columnDefs, boolean ifNotExists, 
ListMultimap<String,Pair<String,Object>> props) {
+    private final boolean cascade;
+    private final List<NamedNode> indexes;
+
+    protected AddColumnStatement(NamedTableNode table, PTableType tableType, 
List<ColumnDef> columnDefs, boolean ifNotExists, 
ListMultimap<String,Pair<String,Object>> props, boolean cascade, 
List<NamedNode> indexes) {
         super(table, tableType);
         this.columnDefs = columnDefs;
         this.props = props == null ? 
ImmutableListMultimap.<String,Pair<String,Object>>of()  : props;
         this.ifNotExists = ifNotExists;
+        this.cascade = cascade;
+        this.indexes = indexes;
     }
 
     public List<ColumnDef> getColumnDefs() {
@@ -48,4 +52,8 @@ public class AddColumnStatement extends AlterTableStatement {
     public ListMultimap<String,Pair<String,Object>> getProps() {
         return props;
     }
+
+    public boolean isCascade() { return cascade; }
+
+    public List<NamedNode> getIndexes() { return indexes; }
 }
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 36a9c2d..af797da 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -357,8 +357,8 @@ public class ParseNodeFactory {
         return new SequenceValueParseNode(tableName, 
SequenceValueParseNode.Op.NEXT_VALUE, numToAllocateNode);
     }
 
-    public AddColumnStatement addColumn(NamedTableNode table,  PTableType 
tableType, List<ColumnDef> columnDefs, boolean ifNotExists, 
ListMultimap<String,Pair<String,Object>> props) {
-        return new AddColumnStatement(table, tableType, columnDefs, 
ifNotExists, props);
+    public AddColumnStatement addColumn(NamedTableNode table,  PTableType 
tableType, List<ColumnDef> columnDefs, boolean ifNotExists, 
ListMultimap<String,Pair<String,Object>> props, boolean cascade, 
List<NamedNode>indexes) {
+        return new AddColumnStatement(table, tableType, columnDefs, 
ifNotExists, props, cascade, indexes);
     }
 
     public DropColumnStatement dropColumn(NamedTableNode table,  PTableType 
tableType, List<ColumnName> columnNodes, boolean ifExists) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 68adde2..24f428b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -158,6 +158,7 @@ import 
org.apache.hadoop.hbase.security.access.AccessControlClient;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.compile.FromCompiler;
 import org.apache.phoenix.compile.IndexExpressionCompiler;
@@ -208,6 +209,7 @@ import org.apache.phoenix.parse.DropSchemaStatement;
 import org.apache.phoenix.parse.DropSequenceStatement;
 import org.apache.phoenix.parse.DropTableStatement;
 import org.apache.phoenix.parse.IndexKeyConstraint;
+import org.apache.phoenix.parse.NamedNode;
 import org.apache.phoenix.parse.NamedTableNode;
 import org.apache.phoenix.parse.OpenStatement;
 import org.apache.phoenix.parse.PFunction;
@@ -508,7 +510,6 @@ public class MetaDataClient {
 
     public static final String EMPTY_TABLE = " ";
 
-
     private final PhoenixConnection connection;
 
     public MetaDataClient(PhoenixConnection connection) {
@@ -1098,7 +1099,7 @@ public class MetaDataClient {
                 }
                 // if there are new columns to add
                 return addColumn(table, columnDefs, statement.getProps(), 
statement.ifNotExists(),
-                        true, NamedTableNode.create(statement.getTableName()), 
statement.getTableType());
+                        true, NamedTableNode.create(statement.getTableName()), 
statement.getTableType(), false, null);
             }
         }
         table = createTableInternal(statement, splits, parent, viewStatement, 
viewType, viewIndexIdType, viewColumnConstants, isViewColumnReferenced, false, 
null, null, tableProps, commonFamilyProps);
@@ -3632,14 +3633,25 @@ public class MetaDataClient {
 
     public MutationState addColumn(AddColumnStatement statement) throws 
SQLException {
         PTable table = FromCompiler.getResolver(statement, 
connection).getTables().get(0).getTable();
-        return addColumn(table, statement.getColumnDefs(), 
statement.getProps(), statement.ifNotExists(), false, statement.getTable(), 
statement.getTableType());
+        return addColumn(table, statement.getColumnDefs(), 
statement.getProps(), statement.ifNotExists(), false, statement.getTable(), 
statement.getTableType(), statement.isCascade(), statement.getIndexes());
     }
 
     public MutationState addColumn(PTable table, List<ColumnDef> 
origColumnDefs,
             ListMultimap<String, Pair<String, Object>> stmtProperties, boolean 
ifNotExists,
-            boolean removeTableProps, NamedTableNode namedTableNode, 
PTableType tableType)
+            boolean removeTableProps, NamedTableNode namedTableNode, 
PTableType tableType, boolean cascade, List<NamedNode> indexes)
                     throws SQLException {
         connection.rollback();
+        List<PTable> indexesPTable = 
Lists.newArrayListWithExpectedSize(indexes != null ?
+                indexes.size() : table.getIndexes().size());
+        Map<PTable, Integer> indexToColumnSizeMap = new HashMap<>();
+
+        // if cascade keyword is passed and indexes are provided either 
implicitly or explicitly
+        if (cascade && (indexes == null || !indexes.isEmpty())) {
+            indexesPTable = getIndexesPTableForCascade(indexes, table);
+            for(PTable index : indexesPTable) {
+                indexToColumnSizeMap.put(index, index.getColumns().size());
+            }
+        }
         boolean wasAutoCommit = connection.getAutoCommit();
         List<PColumn> columns = 
Lists.newArrayListWithExpectedSize(origColumnDefs != null ?
             origColumnDefs.size() : 0);
@@ -3763,10 +3775,10 @@ public class MetaDataClient {
                             if (!colDef.validateDefault(context, null)) {
                                 colDef = new ColumnDef(colDef, null); // 
Remove DEFAULT as it's not necessary
                             }
+                            String familyName = null;
                             Integer encodedCQ = null;
                             if (!colDef.isPK()) {
                                 String colDefFamily = 
colDef.getColumnDefName().getFamilyName();
-                                String familyName = null;
                                 ImmutableStorageScheme storageScheme = 
table.getImmutableStorageScheme();
                                 String defaultColumnFamily = 
tableForCQCounters.getDefaultFamilyName() != null && 
!Strings.isNullOrEmpty(tableForCQCounters.getDefaultFamilyName().getString()) ? 
                                         
tableForCQCounters.getDefaultFamilyName().getString() : DEFAULT_COLUMN_FAMILY;
@@ -3794,6 +3806,12 @@ public class MetaDataClient {
                                 
.setTableName(tableName).build().buildException();
                             }
                             PColumn column = newColumn(position++, colDef, 
PrimaryKeyConstraint.EMPTY, table.getDefaultFamilyName() == null ? null : 
table.getDefaultFamilyName().getString(), true, columnQualifierBytes, 
willBeImmutableRows);
+                            HashMap<PTable, PColumn> indexToIndexColumnMap = 
null;
+                            if (cascade) {
+                                indexToIndexColumnMap = 
getPTablePColumnHashMapForCascade(indexesPTable, willBeImmutableRows,
+                                                colDef, familyName, 
indexToColumnSizeMap);
+                            }
+
                             columns.add(column);
                             String pkName = null;
                             Short keySeq = null;
@@ -3808,6 +3826,13 @@ public class MetaDataClient {
                             }
                             
colFamiliesForPColumnsToBeAdded.add(column.getFamilyName() == null ? null : 
column.getFamilyName().getString());
                             addColumnMutation(schemaName, tableName, column, 
colUpsert, null, pkName, keySeq, table.getBucketNum() != null);
+                            // add new columns for given indexes one by one
+                            if (cascade) {
+                                for (PTable index: indexesPTable) {
+                                    LOGGER.info("Adding column 
"+column.getName().getString()+" to "+index.getTableName().toString());
+                                    addColumnMutation(schemaName, 
index.getTableName().getString(), indexToIndexColumnMap.get(index), colUpsert, 
null, "", keySeq, index.getBucketNum() != null);
+                                }
+                            }
                         }
                         
                         // Add any new PK columns to end of index PK
@@ -3873,7 +3898,18 @@ public class MetaDataClient {
                     
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
                     connection.rollback();
                 }
-                
+
+                if (cascade) {
+                    for (PTable index : indexesPTable) {
+                        incrementTableSeqNum(index, index.getType(), 
columnDefs.size(),
+                                Boolean.FALSE,
+                                
metaPropertiesEvaluated.getUpdateCacheFrequency(),
+                                metaPropertiesEvaluated.getPhoenixTTL());
+                    }
+                    
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
+                    connection.rollback();
+                }
+
                 if (changingPhoenixTableProperty || columnDefs.size() > 0) {
                     incrementTableSeqNum(table, tableType, columnDefs.size(), 
metaPropertiesEvaluated);
 
@@ -4050,6 +4086,80 @@ public class MetaDataClient {
         }
     }
 
+    private List<PTable> getIndexesPTableForCascade(List<NamedNode> indexes, 
PTable table) throws SQLException {
+        boolean isView = table.getType().equals(PTableType.VIEW);
+        List<PTable> indexesPTable = new ArrayList<>();
+
+        // when indexes is null, that means ALL keyword is passed and
+        // we ll collect all global indexes for cascading
+        if(indexes == null) {
+            indexesPTable.addAll(table.getIndexes());
+            for (PTable index : table.getIndexes()) {
+                // a child view has access to its parents indexes,
+                // this if clause ensures we only get the indexes that
+                // are only created on the view itself.
+                if (index.getIndexType().equals(IndexType.LOCAL)
+                        || (isView && 
index.getTableName().toString().contains("#"))) {
+                    indexesPTable.remove(index);
+                }
+            }
+        } else {
+            List<String> indexesParam = 
Lists.newArrayListWithExpectedSize(indexes.size());
+            for (NamedNode index : indexes) {
+                indexesParam.add(index.getName());
+            }
+            // gets the PTable for list of indexes passed in the function
+            // if all the names in parameter list are correct, indexesParam 
list should be empty
+            // by end of the loop
+            for (PTable index : table.getIndexes()) {
+                if(index.getIndexType().equals(IndexType.LOCAL)) {
+                    throw new SQLExceptionInfo
+                            
.Builder(SQLExceptionCode.NOT_SUPPORTED_CASCADE_FEATURE_LOCAL_INDEX)
+                            .setTableName(index.getName().getString())
+                            .build()
+                            .buildException();
+                }
+                if (indexesParam.remove(index.getTableName().getString())) {
+                    indexesPTable.add(index);
+                }
+            }
+            // indexesParam has index names that are not correct
+            if (!indexesParam.isEmpty()) {
+                throw new SQLExceptionInfo
+                        .Builder(SQLExceptionCode.INCORRECT_INDEX_NAME)
+                        .setTableName(StringUtils.join(",", indexesParam))
+                        .build()
+                        .buildException();
+            }
+        }
+        return indexesPTable;
+    }
+
+    private HashMap<PTable, PColumn> 
getPTablePColumnHashMapForCascade(List<PTable> indexesPTable,
+            boolean willBeImmutableRows, ColumnDef colDef, String familyName, 
Map<PTable, Integer> indexToColumnSizeMap) throws SQLException {
+        HashMap<PTable, PColumn> indexColumn;
+        if (colDef.isPK()) {
+            //only supported for non pk column
+            throw new SQLExceptionInfo.Builder(
+                    SQLExceptionCode.NOT_SUPPORTED_CASCADE_FEATURE_PK)
+                    .build()
+                    .buildException();
+        }
+        indexColumn = new HashMap(indexesPTable.size());
+        PDataType indexColDataType = 
IndexUtil.getIndexColumnDataType(colDef.isNull(), colDef.getDataType());
+        ColumnName
+                indexColName = 
ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(familyName, 
colDef.getColumnDefName().getColumnName()));
+        ColumnDef indexColDef = FACTORY.columnDef(indexColName, 
indexColDataType.getSqlTypeName(), colDef.isNull(), colDef.getMaxLength(), 
colDef.getScale(), false, colDef.getSortOrder(), colDef.getExpression(), 
colDef.isRowTimestamp());
+        // TODO: add support to specify tenant owned indexes in the DDL 
statement with CASCADE executed with Global connection
+        for (PTable index : indexesPTable) {
+            int iPos = indexToColumnSizeMap.get(index);
+            PColumn iColumn = newColumn(iPos, indexColDef, null, "", false, 
null, willBeImmutableRows);
+            indexColumn.put(index, iColumn);
+            indexToColumnSizeMap.put(index, iPos+1);
+        }
+        return indexColumn;
+    }
+
     private void deleteMutexCells(String physicalSchemaName, String 
physicalTableName, Set<String> acquiredColumnMutexSet) throws SQLException {
         if (!acquiredColumnMutexSet.isEmpty()) {
             for (String columnName : acquiredColumnMutexSet) {

Reply via email to