[ 
https://issues.apache.org/jira/browse/PHOENIX-5435?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17229577#comment-17229577
 ] 

ASF GitHub Bot commented on PHOENIX-5435:
-----------------------------------------

ChinmaySKulkarni commented on a change in pull request #913:
URL: https://github.com/apache/phoenix/pull/913#discussion_r520944121



##########
File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTableIT.java
##########
@@ -98,4 +99,13 @@ public void 
testQueryWithMultipleColumnFamiliesAndMultipleConditionsForImmutable
             }
         }
     }
+
+    @Test
+    public void testImmutableTableCreation() throws Exception {

Review comment:
       We already have immutable table creation in other tests in this class. 
Do we need this test?

##########
File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,520 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestDDLUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+    private TestDDLUtil ddlUtil;
+    private boolean isImmutable;
+    private boolean isMultiTenant;
+
+    // name is used by failsafe as file name in reports
+    @Parameterized.Parameters(name = 
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+    public static synchronized Collection<Object[]> data() {
+        return Arrays.asList(new Object[]{true, true}, new Object[]{true, 
false},
+            new Object[]{false, true}, new Object[]{false, false});
+    }
+
+    public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+        this.isImmutable = isImmutable;
+        this.isMultiTenant = isMultiTenant;
+        ddlUtil = new TestDDLUtil(false);
+    }
+
+    @BeforeClass
+    public static synchronized void doSetup() throws Exception {
+        Map<String, String> props = new HashMap<String, String>(2);
+        props.put("hbase.coprocessor.wal.classes",
+            AnnotatedWALObserver.class.getName());
+        props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+        props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    }
+
+    @Test
+    public void testSimpleUpsertAndDelete() throws Exception {
+        Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+        TestTableInfo tableInfo = new TestTableInfo();
+        long minTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+        boolean createGlobalIndex = false;
+        upsertAndDeleteHelper(tableInfo, createGlobalIndex);
+        assertAnnotation(2, tableInfo.getPhysicalTableName(), null, 
tableInfo.getSchemaName(),
+            tableInfo.getTableName(), PTableType.TABLE, minTimestamp, true);
+    }
+
+    @Test
+    public void testUpsertAndDeleteWithGlobalIndex() throws Exception {
+        Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+        TestTableInfo tableInfo = new TestTableInfo();
+        long minTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+        boolean createGlobalIndex = true;
+        upsertAndDeleteHelper(tableInfo, createGlobalIndex);
+        assertAnnotation(2, tableInfo.getPhysicalTableName(), null, 
tableInfo.getSchemaName(),
+            tableInfo.getTableName(), PTableType.TABLE, minTimestamp, true);
+        assertAnnotation(2, tableInfo.getPhysicalGlobalIndexName(), null, 
tableInfo.getSchemaName(),
+            tableInfo.getIndexName(), PTableType.INDEX, minTimestamp, true);
+    }
+
+    //Note that local secondary indexes aren't supported because they go in 
the same WALEdit as the
+    // "base" table data they index.
+
+    private void upsertAndDeleteHelper(TestTableInfo tableInfo, boolean 
createGlobalIndex) throws SQLException {
+        try (Connection conn = getConnection()) {
+            ddlUtil.createBaseTable(conn, tableInfo.getSchemaName(), 
tableInfo.getTableName(),
+                isMultiTenant, null, null, isImmutable);
+            if (createGlobalIndex) {
+                ddlUtil.createIndex(conn, tableInfo.getSchemaName(), 
tableInfo.getIndexName(),
+                    tableInfo.getTableName(), "v1", false, false);
+            }
+            String upsertSql = "UPSERT INTO " + tableInfo.getFullTableName() + 
" VALUES" +
+                " ('a', 'b', 2, 'bc', 3)";
+            conn.createStatement().execute(upsertSql);
+            conn.commit();
+            //Deleting by entire PK gets executed as more like an UPSERT 
VALUES than an UPSERT SELECT
+            //(i.e, it generates the Mutations and then pushes them to server, 
rather than
+            // running a select query and deleting the mutations returned)
+            String deleteSql = "DELETE FROM " + tableInfo.getFullTableName() + 
" " +
+                "WHERE t_id = 'a' AND k1 = 'b'";
+            conn.createStatement().execute(deleteSql);
+            conn.commit();
+        }
+    }
+
+    @Test
+    public void testUpsertSelectClientSide() throws Exception {
+        Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+        TestTableInfo baseTableInfo = new TestTableInfo();
+        TestTableInfo targetTableInfo = new TestTableInfo();
+        long minTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = getConnection()) {
+            //upsert selecting from a different table will force processing to 
be client-side
+            ddlUtil.createBaseTable(conn, baseTableInfo.getSchemaName(), 
baseTableInfo.getTableName(),
+                isMultiTenant, null, null, isImmutable);
+            conn.createStatement().execute("UPSERT INTO " + 
baseTableInfo.getFullTableName() + " VALUES" +
+                " ('a', 'b', 2, 'bc', 3)");
+            conn.commit();
+            ddlUtil.createBaseTable(conn, targetTableInfo.getSchemaName(),
+                targetTableInfo.getTableName(), isMultiTenant, null, null, 
isImmutable);
+            String sql = "UPSERT INTO " + targetTableInfo.getFullTableName() +
+                " (t_id, k1, k2, v1, v2) SELECT * FROM " + 
baseTableInfo.getFullTableName();
+            conn.createStatement().execute(sql);
+            conn.commit();
+        }
+        assertAnnotation(1, baseTableInfo.getPhysicalTableName(), null,
+            baseTableInfo.getSchemaName(), baseTableInfo.getTableName(), 
PTableType.TABLE,
+            minTimestamp, true);
+        assertAnnotation(1, targetTableInfo.getPhysicalTableName(), null,
+            targetTableInfo.getSchemaName(), targetTableInfo.getTableName(),
+            PTableType.TABLE, minTimestamp, true);
+    }
+
+    @Test
+    public void testUpsertSelectServerSide() throws Exception {
+        Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+        Assume.assumeFalse(isImmutable); //only mutable tables can be 
processed server-side
+        TestTableInfo targetTableInfo = new TestTableInfo();
+        long minTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = getConnection()) {
+            ddlUtil.createBaseTable(conn, targetTableInfo.getSchemaName(),
+                targetTableInfo.getTableName(), isMultiTenant, null, null, 
isImmutable);
+            conn.createStatement().execute("UPSERT INTO " + 
targetTableInfo.getFullTableName() + " VALUES" +
+                " ('a', 'b', 2, 'bc', 3)");
+            conn.commit();
+            conn.setAutoCommit(true); //required for server side execution
+            
clearAnnotations(TableName.valueOf(SchemaUtil.getPhysicalHBaseTableName(
+                targetTableInfo.getSchemaName(), 
targetTableInfo.getTableName(),
+                false).getString()));
+            String sql = "UPSERT INTO " + targetTableInfo.getFullTableName() +
+                " (t_id, k1, k2, v1, v2) SELECT * FROM " + 
targetTableInfo.getFullTableName();
+            conn.createStatement().execute(sql);
+        }
+        assertAnnotation(1, targetTableInfo.getPhysicalTableName(), null,
+            targetTableInfo.getSchemaName(), targetTableInfo.getTableName(),
+            PTableType.TABLE, minTimestamp, true);
+    }
+
+    @Test
+    public void testGroupedUpsertSelect() throws Exception {
+        Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+        //because we're inserting to a different table than we're selecting 
from, this should be
+        // processed client-side
+        TestTableInfo baseTableInfo = new TestTableInfo();
+        TestTableInfo targetTableInfo = new TestTableInfo();
+        long minTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = getConnection()) {
+            ddlUtil.createBaseTable(conn, baseTableInfo.getSchemaName(),
+                baseTableInfo.getTableName(), isMultiTenant, null, null, 
isImmutable);
+            ddlUtil.createBaseTable(conn, targetTableInfo.getSchemaName(),
+                targetTableInfo.getTableName(), isMultiTenant, null, null, 
isImmutable);
+            conn.createStatement().execute("UPSERT INTO " + 
baseTableInfo.getFullTableName() + " VALUES" +
+                " ('a', 'b', 2, 'bc', 3)");
+            conn.commit();
+            String aggSql = "UPSERT INTO " + 
targetTableInfo.getFullTableName() +
+                " SELECT t_id, k1, k2, MIN(v1), AVG(v2) FROM " + 
baseTableInfo.getFullTableName() +
+                " GROUP BY t_id, k1, k2";
+            conn.createStatement().execute(aggSql);
+            conn.commit();
+        }
+        assertAnnotation(1, baseTableInfo.getPhysicalTableName(), null,
+            baseTableInfo.getSchemaName(), baseTableInfo.getTableName(),
+            PTableType.TABLE, minTimestamp, true);
+        assertAnnotation(1, targetTableInfo.getPhysicalTableName(), null,
+            targetTableInfo.getSchemaName(), targetTableInfo.getTableName(),
+            PTableType.TABLE, minTimestamp, true);
+    }
+
+    @Test
+    public void testRangeDeleteServerSide() throws Exception {
+        boolean isClientSide = false;
+        testRangeDeleteHelper(isClientSide);
+    }
+
+    private void testRangeDeleteHelper(boolean isClientSide) throws 
SQLException, IOException {
+        Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+        TestTableInfo tableInfo = new TestTableInfo();
+        long minTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = getConnection()) {
+            ddlUtil.createBaseTable(conn, tableInfo.getSchemaName(), 
tableInfo.getTableName(),
+                isMultiTenant, null, null, isImmutable);
+            conn.createStatement().execute("UPSERT INTO " + 
tableInfo.getFullTableName() + " VALUES" +
+                " ('a', 'b', 2, 'bc', 3)");
+            conn.commit();
+            //Deleting by a partial PK to so that it executes a SELECT and 
then deletes the
+            // returned mutations
+            String sql = "DELETE FROM " + tableInfo.getFullTableName() + " " +
+                "WHERE t_id = 'a' AND k1 = 'b'";
+
+            if (isClientSide) {
+                sql += " LIMIT 1";
+            }
+            conn.setAutoCommit(!isClientSide);
+            conn.createStatement().execute(sql);
+            conn.commit();
+        }
+        assertAnnotation(2, tableInfo.getPhysicalTableName(), null, 
tableInfo.getSchemaName(),
+            tableInfo.getTableName(), PTableType.TABLE, minTimestamp, true);
+    }
+
+    @Test
+    public void testRangeDeleteClientSide() throws Exception {
+        boolean isClientSide = true;
+        testRangeDeleteHelper(isClientSide);
+    }
+
+    @Test
+    public void testGlobalViewUpsert() throws Exception {
+        Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+        TestTableInfo tableInfo = new TestTableInfo();
+        long minTimestamp = EnvironmentEdgeManager.currentTimeMillis();
+        try (Connection conn = getConnection()) {
+            createGlobalViewHelper(tableInfo, conn);
+            conn.createStatement().execute("UPSERT INTO " + 
tableInfo.getFullViewName()
+                + " VALUES" + " ('a', 'b', 2, 'bc', 3)");
+            conn.commit();
+            String deleteSql = "DELETE FROM " + tableInfo.getFullViewName() + 
" " +
+                "WHERE t_id = 'a' AND k1 = 'b' and k2 = 2";
+            conn.createStatement().execute(deleteSql);
+            conn.commit();        }
+        assertAnnotation(2, tableInfo.getPhysicalTableName(), null, 
tableInfo.getSchemaName(),
+            tableInfo.getViewName(), PTableType.VIEW, minTimestamp, true);
+    }
+    private void createGlobalViewHelper(TestTableInfo tableInfo, Connection 
conn) throws SQLException {

Review comment:
       nit: add a newline

##########
File path: phoenix-core/src/test/java/org/apache/phoenix/util/TestDDLUtil.java
##########
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.util;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+public class TestDDLUtil {

Review comment:
       I might be wrong here, but don't we already have a class like this 
introduced by @jpisaac for creating test tables, indices, etc. and test data 
too?

##########
File path: pom.xml
##########
@@ -1173,7 +1173,7 @@
     </profile>
     <!-- See BUILDING.md for profile selection -->
     <profile>
-      <id>phoenix-hbase-compat-1.3.0-default</id>
+      <id>phoenix-hbase-compat-1.5.0-default</id>

Review comment:
       Why did we change this?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


> Annotate HBase WALs with Phoenix Metadata
> -----------------------------------------
>
>                 Key: PHOENIX-5435
>                 URL: https://issues.apache.org/jira/browse/PHOENIX-5435
>             Project: Phoenix
>          Issue Type: Sub-task
>            Reporter: Geoffrey Jacoby
>            Assignee: Geoffrey Jacoby
>            Priority: Major
>         Attachments: PHOENIX-5435-4.x.patch
>
>
> HBase write-ahead-logs (WALs) drive not only failure recovery, but HBase 
> replication and some HBase backup frameworks. The WALs contain HBase-level 
> metadata such as table and region, but lack Phoenix-level metadata. That 
> means that it's quite difficult to build correct logic that needs to know 
> about Phoenix-level constructs such as multi-tenancy, views, or indexes. 
> HBASE-22622 and HBASE-22623 add the capacity for coprocessors to annotate 
> extra key/value pairs of metadata into the HBase WAL. We should have the 
> option to annotate the tuple <tenant_id, table-or-view-name, timestamp>, or 
> some hashed way to reconstruct that tuple into the WAL. It should have a 
> feature toggle so operators who don't need it don't bear the slight extra 
> storage cost. 



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to