[
https://issues.apache.org/jira/browse/PHOENIX-6649?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17498304#comment-17498304
]
ASF GitHub Bot commented on PHOENIX-6649:
-----------------------------------------
gjacoby126 commented on a change in pull request #1397:
URL: https://github.com/apache/phoenix/pull/1397#discussion_r815118143
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformWithViewsInputFormat.java
##########
@@ -0,0 +1,116 @@
+package org.apache.phoenix.mapreduce.transform;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.phoenix.compile.MutationPlan;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.ServerBuildTransformingTableCompiler;
+import org.apache.phoenix.coprocessor.TableInfo;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.mapreduce.PhoenixInputFormat;
+import org.apache.phoenix.mapreduce.PhoenixServerBuildIndexInputFormat;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.mapreduce.util.ViewInfoWritable;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.transform.Transform;
+import org.apache.phoenix.thirdparty.com.google.common.base.Strings;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.StringUtil;
+import org.apache.phoenix.util.ViewUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES;
+import static
org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolIndexTableName;
+
+public class PhoenixTransformWithViewsInputFormat<T extends DBWritable>
extends PhoenixServerBuildIndexInputFormat {
+ private static final Logger LOGGER =
+
LoggerFactory.getLogger(PhoenixTransformWithViewsInputFormat.class);
+ @Override
+ public List<InputSplit> getSplits(JobContext context) throws IOException,
InterruptedException {
+ final Configuration configuration = context.getConfiguration();
+ try (PhoenixConnection connection = (PhoenixConnection)
+ ConnectionUtil.getInputConnection(configuration)) {
+ try (Table hTable =
connection.unwrap(PhoenixConnection.class).getQueryServices().getTable(
+
SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES,
configuration).toBytes())) {
+ String oldDataTableFullName =
PhoenixConfigurationUtil.getIndexToolDataTableName(configuration);
+ String newDataTableFullName =
getIndexToolIndexTableName(configuration);
+ PTable newDataTable =
PhoenixRuntime.getTableNoCache(connection, newDataTableFullName);
+ String schemaName =
SchemaUtil.getSchemaNameFromFullName(oldDataTableFullName);
+ String tableName =
SchemaUtil.getTableNameFromFullName(oldDataTableFullName);
+ byte[] schemaNameBytes = Strings.isNullOrEmpty(schemaName) ?
null : schemaName.getBytes();
+ Pair<List<PTable>, List<TableInfo>> allDescendantViews =
ViewUtil.findAllDescendantViews(hTable, configuration, null, schemaNameBytes,
+ tableName.getBytes(),
EnvironmentEdgeManager.currentTimeMillis(), false);
+ List<PTable> legitimateDecendants =
allDescendantViews.getFirst();
+
+ List<InputSplit> inputSplits = new ArrayList<>();
+
+ HashMap<String, PColumn> columnMap = new HashMap<>();
+ for (PColumn column : newDataTable.getColumns()) {
+ columnMap.put(column.getName().getString(), column);
+ }
+
+ for (PTable decendant : legitimateDecendants) {
+ if (decendant.getViewType() == PTable.ViewType.READ_ONLY) {
+ continue;
+ }
+ PTable newView = Transform.getTransformedView(decendant,
newDataTable, columnMap, true);
+ QueryPlan queryPlan = getQueryPlan(newView, decendant,
connection);
+ inputSplits.addAll(generateSplits(queryPlan,
configuration));
+ }
Review comment:
What happens if the views are not disjoint? Do we just harmlessly
transform the same data multiple times into the same shape? Seems like we
should have a test for that if we don't already.
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformWithViewsInputFormat.java
##########
@@ -0,0 +1,116 @@
+package org.apache.phoenix.mapreduce.transform;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.phoenix.compile.MutationPlan;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.ServerBuildTransformingTableCompiler;
+import org.apache.phoenix.coprocessor.TableInfo;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.mapreduce.PhoenixInputFormat;
+import org.apache.phoenix.mapreduce.PhoenixServerBuildIndexInputFormat;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.mapreduce.util.ViewInfoWritable;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.transform.Transform;
+import org.apache.phoenix.thirdparty.com.google.common.base.Strings;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.StringUtil;
+import org.apache.phoenix.util.ViewUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES;
+import static
org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolIndexTableName;
+
+public class PhoenixTransformWithViewsInputFormat<T extends DBWritable>
extends PhoenixServerBuildIndexInputFormat {
+ private static final Logger LOGGER =
+
LoggerFactory.getLogger(PhoenixTransformWithViewsInputFormat.class);
+ @Override
+ public List<InputSplit> getSplits(JobContext context) throws IOException,
InterruptedException {
+ final Configuration configuration = context.getConfiguration();
+ try (PhoenixConnection connection = (PhoenixConnection)
+ ConnectionUtil.getInputConnection(configuration)) {
+ try (Table hTable =
connection.unwrap(PhoenixConnection.class).getQueryServices().getTable(
+
SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES,
configuration).toBytes())) {
+ String oldDataTableFullName =
PhoenixConfigurationUtil.getIndexToolDataTableName(configuration);
+ String newDataTableFullName =
getIndexToolIndexTableName(configuration);
+ PTable newDataTable =
PhoenixRuntime.getTableNoCache(connection, newDataTableFullName);
+ String schemaName =
SchemaUtil.getSchemaNameFromFullName(oldDataTableFullName);
+ String tableName =
SchemaUtil.getTableNameFromFullName(oldDataTableFullName);
+ byte[] schemaNameBytes = Strings.isNullOrEmpty(schemaName) ?
null : schemaName.getBytes();
+ Pair<List<PTable>, List<TableInfo>> allDescendantViews =
ViewUtil.findAllDescendantViews(hTable, configuration, null, schemaNameBytes,
+ tableName.getBytes(),
EnvironmentEdgeManager.currentTimeMillis(), false);
+ List<PTable> legitimateDecendants =
allDescendantViews.getFirst();
+
+ List<InputSplit> inputSplits = new ArrayList<>();
+
+ HashMap<String, PColumn> columnMap = new HashMap<>();
+ for (PColumn column : newDataTable.getColumns()) {
+ columnMap.put(column.getName().getString(), column);
+ }
+
+ for (PTable decendant : legitimateDecendants) {
+ if (decendant.getViewType() == PTable.ViewType.READ_ONLY) {
+ continue;
+ }
+ PTable newView = Transform.getTransformedView(decendant,
newDataTable, columnMap, true);
+ QueryPlan queryPlan = getQueryPlan(newView, decendant,
connection);
+ inputSplits.addAll(generateSplits(queryPlan,
configuration));
Review comment:
Does the use of the setMultiInputMapperSplitSize in the TransformTool
mean that multiple splits can be worked by the same mapper? (If so that's very
good, because we can have lots of small views.) Either way, there's probably
lots of future optimization work based on the view size, but we need better
stats to be able to do that sort of thing so we can defer for now.
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/transform/TransformToolIT.java
##########
@@ -948,6 +959,175 @@ public void testTransformVerify_ForceCutover() throws
Exception {
}
}
+ @Test
+ public void testTransformForGlobalViews() throws Exception {
+ String schemaName = generateUniqueName();
+ String dataTableName = generateUniqueName();
+ String dataTableFullName = SchemaUtil.getTableName(schemaName,
dataTableName);
+ String view1Name = "VW1_" + generateUniqueName();
+ String view2Name = "VW2_" + generateUniqueName();
+ String upsertQuery = "UPSERT INTO %s VALUES(?, ?, ?, ?, ?, ?)";
+
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ int numOfRows = 0;
+ createTableAndUpsertRows(conn, dataTableFullName, numOfRows,
tableDDLOptions);
+ SingleCellIndexIT.assertMetadata(conn,
PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN,
PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, dataTableFullName);
+
+ String createViewSql = "CREATE VIEW " + view1Name + " ( VIEW_COL11
INTEGER, VIEW_COL12 VARCHAR ) AS SELECT * FROM "
+ + dataTableFullName + " where ID=1";
+ conn.createStatement().execute(createViewSql);
+
+ createViewSql = "CREATE VIEW " + view2Name + " ( VIEW_COL21
INTEGER, VIEW_COL22 VARCHAR ) AS SELECT * FROM "
+ + dataTableFullName + " where ID=11";
+ conn.createStatement().execute(createViewSql);
+
+ PreparedStatement stmt1 =
conn.prepareStatement(String.format(upsertQuery, view1Name));
+ stmt1.setInt(1, 1);
+ stmt1.setString(2, "uname1");
+ stmt1.setInt(3, 95051);
+ stmt1.setString(4, "");
+ stmt1.setInt(5, 101);
+ stmt1.setString(6, "viewCol12");
+ stmt1.executeUpdate();
+ conn.commit();
+
+ stmt1 = conn.prepareStatement(String.format(upsertQuery,
view2Name));
+ stmt1.setInt(1, 11);
+ stmt1.setString(2, "uname11");
+ stmt1.setInt(3, 950511);
+ stmt1.setString(4, "");
+ stmt1.setInt(5, 111);
+ stmt1.setString(6, "viewCol22");
+ stmt1.executeUpdate();
+ conn.commit();
+
+ conn.createStatement().execute("ALTER TABLE " + dataTableFullName +
+ " SET
IMMUTABLE_STORAGE_SCHEME=SINGLE_CELL_ARRAY_WITH_OFFSETS,
COLUMN_ENCODED_BYTES=2");
+ SystemTransformRecord record =
Transform.getTransformRecord(schemaName, dataTableName, null, null,
conn.unwrap(PhoenixConnection.class));
+ assertNotNull(record);
+ assertMetadata(conn,
PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS,
PTable.QualifierEncodingScheme.TWO_BYTE_QUALIFIERS,
record.getNewPhysicalTableName());
+
+ List<String> args = getArgList(schemaName, dataTableName, null,
+ null, null, null, false, false, false, false, false);
+ runTransformTool(args.toArray(new String[0]), 0);
+ Transform.doCutover(conn.unwrap(PhoenixConnection.class), record);
+
Transform.updateTransformRecord(conn.unwrap(PhoenixConnection.class), record,
PTable.TransformStatus.COMPLETED);
+ try (Admin admin =
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ admin.disableTable(TableName.valueOf(dataTableFullName));
+ admin.truncateTable(TableName.valueOf(dataTableFullName),
true);
+ }
+
+ String sql = "SELECT VIEW_COL11, VIEW_COL12 FROM %s ";
+ ResultSet rs1 =
conn.createStatement().executeQuery(String.format(sql, view1Name));
+ assertTrue(rs1.next());
+ assertEquals(101, rs1.getInt(1));
+ assertEquals("viewCol12", rs1.getString(2));
+
+ sql = "SELECT VIEW_COL21, VIEW_COL22 FROM %s ";
+ rs1 = conn.createStatement().executeQuery(String.format(sql,
view2Name));
+ assertTrue(rs1.next());
+ assertEquals(111, rs1.getInt(1));
+ assertEquals("viewCol22", rs1.getString(2));
+ }
+ }
+
+ @Test
+ public void testTransformForTenantViews() throws Exception {
+ String schemaName = generateUniqueName();
+ String dataTableName = generateUniqueName();
+ String dataTableFullName = SchemaUtil.getTableName(schemaName,
dataTableName);
+ String view1Name = "VW1_" + generateUniqueName();
+ String view2Name = "VW2_" + generateUniqueName();
+ String upsertQuery = "UPSERT INTO %s VALUES(?, ?, ?, ?, ?, ?)";
+
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ int numOfRows = 0;
+ createTableAndUpsertRows(conn, dataTableFullName, numOfRows,
tableDDLOptions);
+ SingleCellIndexIT.assertMetadata(conn,
PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN,
PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, dataTableFullName);
+ }
+
+ try (Connection tenantConn1 = getTenantConnection("tenant1")) {
+ String createViewSql = "CREATE VIEW " + view1Name + " ( VIEW_COL11
INTEGER, VIEW_COL12 VARCHAR ) AS SELECT * FROM "
+ + dataTableFullName + " where ID=1";
+ tenantConn1.createStatement().execute(createViewSql);
+ }
+
+ try (Connection tenantConn2 = getTenantConnection("tenant2")) {
+ String createViewSql = "CREATE VIEW " + view2Name + " ( VIEW_COL21
INTEGER, VIEW_COL22 VARCHAR ) AS SELECT * FROM "
+ + dataTableFullName + " where ID=11";
+ tenantConn2.createStatement().execute(createViewSql);
+ }
+
+ try (Connection tenantConn1 = getTenantConnection("tenant1")) {
+ PreparedStatement stmt1 =
tenantConn1.prepareStatement(String.format(upsertQuery, view1Name));
+ stmt1.setInt(1, 1);
+ stmt1.setString(2, "uname1");
+ stmt1.setInt(3, 95051);
+ stmt1.setString(4, "");
+ stmt1.setInt(5, 101);
+ stmt1.setString(6, "viewCol12");
+ stmt1.executeUpdate();
+ tenantConn1.commit();
+ }
+
+ try (Connection tenantConn2 = getTenantConnection("tenant2")) {
+ PreparedStatement stmt1 =
tenantConn2.prepareStatement(String.format(upsertQuery, view2Name));
+ stmt1.setInt(1, 11);
+ stmt1.setString(2, "uname11");
+ stmt1.setInt(3, 950511);
+ stmt1.setString(4, "");
+ stmt1.setInt(5, 111);
+ stmt1.setString(6, "viewCol22");
+ stmt1.executeUpdate();
+ tenantConn2.commit();
+ }
+
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.createStatement().execute("ALTER TABLE " + dataTableFullName +
+ " SET
IMMUTABLE_STORAGE_SCHEME=SINGLE_CELL_ARRAY_WITH_OFFSETS,
COLUMN_ENCODED_BYTES=2");
+ SystemTransformRecord record =
Transform.getTransformRecord(schemaName, dataTableName, null, null,
conn.unwrap(PhoenixConnection.class));
+ assertNotNull(record);
+ assertMetadata(conn,
PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS,
PTable.QualifierEncodingScheme.TWO_BYTE_QUALIFIERS,
record.getNewPhysicalTableName());
+
+ List<String> args = getArgList(schemaName, dataTableName, null,
+ null, null, null, false, false, false, false, false);
+ runTransformTool(args.toArray(new String[0]), 0);
+ Transform.doCutover(conn.unwrap(PhoenixConnection.class), record);
+
Transform.updateTransformRecord(conn.unwrap(PhoenixConnection.class), record,
PTable.TransformStatus.COMPLETED);
+ try (Admin admin =
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ admin.disableTable(TableName.valueOf(dataTableFullName));
+ admin.truncateTable(TableName.valueOf(dataTableFullName),
true);
+ }
+ }
+
+ try (Connection tenantConn1 = getTenantConnection("tenant1")) {
+ String sql = "SELECT VIEW_COL11, VIEW_COL12 FROM %s ";
+ ResultSet rs1 =
tenantConn1.createStatement().executeQuery(String.format(sql, view1Name));
+ assertTrue(rs1.next());
+ assertEquals(101, rs1.getInt(1));
+ assertEquals("viewCol12", rs1.getString(2));
+ }
+
+ try (Connection tenantConn2 = getTenantConnection("tenant2")) {
+ String sql = "SELECT VIEW_COL21, VIEW_COL22 FROM %s ";
+ ResultSet rs1 =
tenantConn2.createStatement().executeQuery(String.format(sql, view2Name));
+ assertTrue(rs1.next());
+ assertEquals(111, rs1.getInt(1));
+ assertEquals("viewCol22", rs1.getString(2));
+ }
+ }
+
+
+ public static Connection getTenantConnection(String tenant) throws
SQLException {
Review comment:
Could we please get a test for overlapping but still updatable views?
(For example, one view is WHERE Foo = 'a' AND Bar = 'b', and another is just
WHERE Foo = 'a'.)
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/transform/TransformToolIT.java
##########
@@ -948,6 +959,175 @@ public void testTransformVerify_ForceCutover() throws
Exception {
}
}
+ @Test
+ public void testTransformForGlobalViews() throws Exception {
+ String schemaName = generateUniqueName();
+ String dataTableName = generateUniqueName();
+ String dataTableFullName = SchemaUtil.getTableName(schemaName,
dataTableName);
+ String view1Name = "VW1_" + generateUniqueName();
+ String view2Name = "VW2_" + generateUniqueName();
+ String upsertQuery = "UPSERT INTO %s VALUES(?, ?, ?, ?, ?, ?)";
+
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ int numOfRows = 0;
+ createTableAndUpsertRows(conn, dataTableFullName, numOfRows,
tableDDLOptions);
+ SingleCellIndexIT.assertMetadata(conn,
PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN,
PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, dataTableFullName);
+
+ String createViewSql = "CREATE VIEW " + view1Name + " ( VIEW_COL11
INTEGER, VIEW_COL12 VARCHAR ) AS SELECT * FROM "
+ + dataTableFullName + " where ID=1";
+ conn.createStatement().execute(createViewSql);
+
+ createViewSql = "CREATE VIEW " + view2Name + " ( VIEW_COL21
INTEGER, VIEW_COL22 VARCHAR ) AS SELECT * FROM "
+ + dataTableFullName + " where ID=11";
+ conn.createStatement().execute(createViewSql);
+
+ PreparedStatement stmt1 =
conn.prepareStatement(String.format(upsertQuery, view1Name));
+ stmt1.setInt(1, 1);
+ stmt1.setString(2, "uname1");
+ stmt1.setInt(3, 95051);
+ stmt1.setString(4, "");
+ stmt1.setInt(5, 101);
+ stmt1.setString(6, "viewCol12");
+ stmt1.executeUpdate();
+ conn.commit();
+
+ stmt1 = conn.prepareStatement(String.format(upsertQuery,
view2Name));
+ stmt1.setInt(1, 11);
+ stmt1.setString(2, "uname11");
+ stmt1.setInt(3, 950511);
+ stmt1.setString(4, "");
+ stmt1.setInt(5, 111);
+ stmt1.setString(6, "viewCol22");
+ stmt1.executeUpdate();
+ conn.commit();
+
+ conn.createStatement().execute("ALTER TABLE " + dataTableFullName +
+ " SET
IMMUTABLE_STORAGE_SCHEME=SINGLE_CELL_ARRAY_WITH_OFFSETS,
COLUMN_ENCODED_BYTES=2");
+ SystemTransformRecord record =
Transform.getTransformRecord(schemaName, dataTableName, null, null,
conn.unwrap(PhoenixConnection.class));
+ assertNotNull(record);
+ assertMetadata(conn,
PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS,
PTable.QualifierEncodingScheme.TWO_BYTE_QUALIFIERS,
record.getNewPhysicalTableName());
+
+ List<String> args = getArgList(schemaName, dataTableName, null,
+ null, null, null, false, false, false, false, false);
+ runTransformTool(args.toArray(new String[0]), 0);
+ Transform.doCutover(conn.unwrap(PhoenixConnection.class), record);
+
Transform.updateTransformRecord(conn.unwrap(PhoenixConnection.class), record,
PTable.TransformStatus.COMPLETED);
+ try (Admin admin =
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ admin.disableTable(TableName.valueOf(dataTableFullName));
+ admin.truncateTable(TableName.valueOf(dataTableFullName),
true);
+ }
+
+ String sql = "SELECT VIEW_COL11, VIEW_COL12 FROM %s ";
+ ResultSet rs1 =
conn.createStatement().executeQuery(String.format(sql, view1Name));
+ assertTrue(rs1.next());
+ assertEquals(101, rs1.getInt(1));
+ assertEquals("viewCol12", rs1.getString(2));
+
+ sql = "SELECT VIEW_COL21, VIEW_COL22 FROM %s ";
+ rs1 = conn.createStatement().executeQuery(String.format(sql,
view2Name));
+ assertTrue(rs1.next());
+ assertEquals(111, rs1.getInt(1));
+ assertEquals("viewCol22", rs1.getString(2));
+ }
+ }
+
+ @Test
+ public void testTransformForTenantViews() throws Exception {
+ String schemaName = generateUniqueName();
+ String dataTableName = generateUniqueName();
+ String dataTableFullName = SchemaUtil.getTableName(schemaName,
dataTableName);
+ String view1Name = "VW1_" + generateUniqueName();
+ String view2Name = "VW2_" + generateUniqueName();
+ String upsertQuery = "UPSERT INTO %s VALUES(?, ?, ?, ?, ?, ?)";
+
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ int numOfRows = 0;
+ createTableAndUpsertRows(conn, dataTableFullName, numOfRows,
tableDDLOptions);
+ SingleCellIndexIT.assertMetadata(conn,
PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN,
PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, dataTableFullName);
+ }
+
+ try (Connection tenantConn1 = getTenantConnection("tenant1")) {
+ String createViewSql = "CREATE VIEW " + view1Name + " ( VIEW_COL11
INTEGER, VIEW_COL12 VARCHAR ) AS SELECT * FROM "
+ + dataTableFullName + " where ID=1";
+ tenantConn1.createStatement().execute(createViewSql);
+ }
+
+ try (Connection tenantConn2 = getTenantConnection("tenant2")) {
+ String createViewSql = "CREATE VIEW " + view2Name + " ( VIEW_COL21
INTEGER, VIEW_COL22 VARCHAR ) AS SELECT * FROM "
+ + dataTableFullName + " where ID=11";
+ tenantConn2.createStatement().execute(createViewSql);
+ }
+
+ try (Connection tenantConn1 = getTenantConnection("tenant1")) {
+ PreparedStatement stmt1 =
tenantConn1.prepareStatement(String.format(upsertQuery, view1Name));
+ stmt1.setInt(1, 1);
+ stmt1.setString(2, "uname1");
+ stmt1.setInt(3, 95051);
+ stmt1.setString(4, "");
+ stmt1.setInt(5, 101);
+ stmt1.setString(6, "viewCol12");
+ stmt1.executeUpdate();
+ tenantConn1.commit();
+ }
+
+ try (Connection tenantConn2 = getTenantConnection("tenant2")) {
+ PreparedStatement stmt1 =
tenantConn2.prepareStatement(String.format(upsertQuery, view2Name));
+ stmt1.setInt(1, 11);
+ stmt1.setString(2, "uname11");
+ stmt1.setInt(3, 950511);
+ stmt1.setString(4, "");
+ stmt1.setInt(5, 111);
+ stmt1.setString(6, "viewCol22");
+ stmt1.executeUpdate();
+ tenantConn2.commit();
+ }
+
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.createStatement().execute("ALTER TABLE " + dataTableFullName +
+ " SET
IMMUTABLE_STORAGE_SCHEME=SINGLE_CELL_ARRAY_WITH_OFFSETS,
COLUMN_ENCODED_BYTES=2");
+ SystemTransformRecord record =
Transform.getTransformRecord(schemaName, dataTableName, null, null,
conn.unwrap(PhoenixConnection.class));
+ assertNotNull(record);
+ assertMetadata(conn,
PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS,
PTable.QualifierEncodingScheme.TWO_BYTE_QUALIFIERS,
record.getNewPhysicalTableName());
+
+ List<String> args = getArgList(schemaName, dataTableName, null,
+ null, null, null, false, false, false, false, false);
+ runTransformTool(args.toArray(new String[0]), 0);
+ Transform.doCutover(conn.unwrap(PhoenixConnection.class), record);
+
Transform.updateTransformRecord(conn.unwrap(PhoenixConnection.class), record,
PTable.TransformStatus.COMPLETED);
+ try (Admin admin =
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ admin.disableTable(TableName.valueOf(dataTableFullName));
+ admin.truncateTable(TableName.valueOf(dataTableFullName),
true);
+ }
+ }
+
+ try (Connection tenantConn1 = getTenantConnection("tenant1")) {
+ String sql = "SELECT VIEW_COL11, VIEW_COL12 FROM %s ";
+ ResultSet rs1 =
tenantConn1.createStatement().executeQuery(String.format(sql, view1Name));
+ assertTrue(rs1.next());
+ assertEquals(101, rs1.getInt(1));
+ assertEquals("viewCol12", rs1.getString(2));
+ }
+
+ try (Connection tenantConn2 = getTenantConnection("tenant2")) {
+ String sql = "SELECT VIEW_COL21, VIEW_COL22 FROM %s ";
+ ResultSet rs1 =
tenantConn2.createStatement().executeQuery(String.format(sql, view2Name));
+ assertTrue(rs1.next());
+ assertEquals(111, rs1.getInt(1));
+ assertEquals("viewCol22", rs1.getString(2));
+ }
+ }
+
+
+ public static Connection getTenantConnection(String tenant) throws
SQLException {
Review comment:
In TransformMonitorExtendedIT I believe you have an updatable and
non-updatable tenant view that overlap, but since we skip transforming
read-only views, that's not quite the same thing.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
> TransformTool should transform the tenant view content as well
> --------------------------------------------------------------
>
> Key: PHOENIX-6649
> URL: https://issues.apache.org/jira/browse/PHOENIX-6649
> Project: Phoenix
> Issue Type: Sub-task
> Reporter: Gokcen Iskender
> Priority: Major
>
--
This message was sent by Atlassian Jira
(v8.20.1#820001)