ChinmaySKulkarni commented on a change in pull request #913:
URL: https://github.com/apache/phoenix/pull/913#discussion_r539644075
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTableIT.java
##########
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.end2end;
+import org.apache.phoenix.util.TestDDLUtil;
Review comment:
nit: unused import
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
Review comment:
nit: static import `assertFalse()` like you've done for `assertNotNull()`
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
##########
@@ -247,9 +195,10 @@ public void testMultiTenantViewLocalIndex() throws
Exception {
String indexName = "IND_" + generateUniqueName();
String fullTableName = SchemaUtil.getTableName(SCHEMA1, tableName);
String fullViewName = SchemaUtil.getTableName(SCHEMA2,
generateUniqueName());
-
- createBaseTable(SCHEMA1, tableName, true, null, null, true);
- Connection conn = DriverManager.getConnection(getUrl());
+
+ Connection conn = getConnection();
Review comment:
Is this a connection leak?
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
+ table.isChangeDetectionEnabled());
+ //set to FALSE
+ String disableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=FALSE";
+ conn.createStatement().execute(disableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it should be
disabled!",
+ table.isChangeDetectionEnabled());
+ //now upsert again
+ conn.createStatement().execute(upsertSql);
+ //check that we still didn't annotate anything
+ entries =
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ }
+ }
+
+ @Test
+ public void testCantSetChangeDetectionOnIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ builder.withTableDefaults().build();
+ try {
+ String badIndexSql =
+ "CREATE INDEX IDX_SHOULD_FAIL" + " ON " +
builder.getEntityTableName() +
+ "(COL1) "
+ + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(badIndexSql);
+ Assert.fail("Didn't throw a SQLException for setting change
detection on an " +
+ "index at create time!");
+ } catch (SQLException se) {
+ TestUtil.assertSqlExceptionCode(
+
SQLExceptionCode.CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY, se);
+ }
+ }
+ }
+
+ @Test
+ public void testUpsertAndDeleteWithGlobalIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = true;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ assertAnnotation(0, builder.getPhysicalTableIndexName(false),
+ null, builder.getTableOptions().getSchemaName(),
+
SchemaUtil.getTableNameFromFullName(builder.getEntityTableIndexName()),
+ PTableType.INDEX,
+ ddlTimestamp);
+ }
+
+ //Note that local secondary indexes aren't supported because they go in
the same WALEdit as the
Review comment:
When you say "aren't supported", do you mean there should be no virtual
table name corresponding to the local index in the WAL annotations for the base
table? Can we add a test for this?
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java
##########
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.util;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+
+import java.util.Map;
+
+/**
+ * Utility functions shared between IndexRegionObserver and GlobalIndexChecker
for annotating the
+ * HBase WAL with Phoenix-level metadata about mutations.
+ */
+public class WALAnnotationUtil {
Review comment:
Since this is specific to indexing, can we rename the class to reflect
that?
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
Review comment:
nit: static import `assertEquals()`
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
+ table.isChangeDetectionEnabled());
+ //set to FALSE
+ String disableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=FALSE";
+ conn.createStatement().execute(disableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it should be
disabled!",
+ table.isChangeDetectionEnabled());
+ //now upsert again
+ conn.createStatement().execute(upsertSql);
+ //check that we still didn't annotate anything
+ entries =
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ }
+ }
+
+ @Test
+ public void testCantSetChangeDetectionOnIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ builder.withTableDefaults().build();
+ try {
+ String badIndexSql =
+ "CREATE INDEX IDX_SHOULD_FAIL" + " ON " +
builder.getEntityTableName() +
+ "(COL1) "
+ + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(badIndexSql);
+ Assert.fail("Didn't throw a SQLException for setting change
detection on an " +
+ "index at create time!");
+ } catch (SQLException se) {
+ TestUtil.assertSqlExceptionCode(
+
SQLExceptionCode.CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY, se);
+ }
+ }
+ }
+
+ @Test
+ public void testUpsertAndDeleteWithGlobalIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = true;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ assertAnnotation(0, builder.getPhysicalTableIndexName(false),
+ null, builder.getTableOptions().getSchemaName(),
+
SchemaUtil.getTableNameFromFullName(builder.getEntityTableIndexName()),
+ PTableType.INDEX,
+ ddlTimestamp);
+ }
+
+ //Note that local secondary indexes aren't supported because they go in
the same WALEdit as the
+ // "base" table data they index.
+
+ private long upsertAndDeleteHelper(SchemaBuilder builder, boolean
createGlobalIndex) throws Exception {
+ try (Connection conn = getConnection()) {
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+
+ if (createGlobalIndex) {
+
builder.withTableOptions(tableOptions).withTableIndexDefaults().build();
+ } else {
+ builder.withTableOptions(tableOptions).build();
+ }
+
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', 'c')";
+ conn.createStatement().execute(upsertSql);
+ conn.commit();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change Detection Enabled is false!",
+ table.isChangeDetectionEnabled());
+ //Deleting by entire PK gets executed as more like an UPSERT
VALUES than an UPSERT SELECT
Review comment:
Isn't there a config that toggles client-side vs. server-side deletes?
Just curious, will setting this config to `client-side` cause some of these
tests to fail?
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
Review comment:
nit: static import assertTrue()
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
+ table.isChangeDetectionEnabled());
+ //set to FALSE
+ String disableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=FALSE";
+ conn.createStatement().execute(disableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it should be
disabled!",
+ table.isChangeDetectionEnabled());
+ //now upsert again
+ conn.createStatement().execute(upsertSql);
+ //check that we still didn't annotate anything
+ entries =
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ }
+ }
+
+ @Test
+ public void testCantSetChangeDetectionOnIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ builder.withTableDefaults().build();
+ try {
+ String badIndexSql =
+ "CREATE INDEX IDX_SHOULD_FAIL" + " ON " +
builder.getEntityTableName() +
+ "(COL1) "
+ + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(badIndexSql);
+ Assert.fail("Didn't throw a SQLException for setting change
detection on an " +
Review comment:
nit: static import `fail()`
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
+ table.isChangeDetectionEnabled());
+ //set to FALSE
+ String disableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=FALSE";
+ conn.createStatement().execute(disableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it should be
disabled!",
+ table.isChangeDetectionEnabled());
+ //now upsert again
+ conn.createStatement().execute(upsertSql);
+ //check that we still didn't annotate anything
+ entries =
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ }
+ }
+
+ @Test
+ public void testCantSetChangeDetectionOnIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ builder.withTableDefaults().build();
+ try {
+ String badIndexSql =
+ "CREATE INDEX IDX_SHOULD_FAIL" + " ON " +
builder.getEntityTableName() +
+ "(COL1) "
+ + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(badIndexSql);
+ Assert.fail("Didn't throw a SQLException for setting change
detection on an " +
+ "index at create time!");
+ } catch (SQLException se) {
+ TestUtil.assertSqlExceptionCode(
+
SQLExceptionCode.CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY, se);
+ }
+ }
+ }
+
+ @Test
+ public void testUpsertAndDeleteWithGlobalIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = true;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ assertAnnotation(0, builder.getPhysicalTableIndexName(false),
+ null, builder.getTableOptions().getSchemaName(),
+
SchemaUtil.getTableNameFromFullName(builder.getEntityTableIndexName()),
+ PTableType.INDEX,
+ ddlTimestamp);
+ }
+
+ //Note that local secondary indexes aren't supported because they go in
the same WALEdit as the
+ // "base" table data they index.
+
+ private long upsertAndDeleteHelper(SchemaBuilder builder, boolean
createGlobalIndex) throws Exception {
+ try (Connection conn = getConnection()) {
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+
+ if (createGlobalIndex) {
+
builder.withTableOptions(tableOptions).withTableIndexDefaults().build();
+ } else {
+ builder.withTableOptions(tableOptions).build();
+ }
+
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', 'c')";
+ conn.createStatement().execute(upsertSql);
+ conn.commit();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change Detection Enabled is false!",
+ table.isChangeDetectionEnabled());
+ //Deleting by entire PK gets executed as more like an UPSERT
VALUES than an UPSERT SELECT
+ //(i.e, it generates the Mutations and then pushes them to server,
rather than
+ // running a select query and deleting the mutations returned)
+ String deleteSql = "DELETE FROM " + builder.getEntityTableName() +
" " +
+ "WHERE OID = 'a' AND KP = 'b'";
+ conn.createStatement().execute(deleteSql);
+ conn.commit();
+ return table.getLastDDLTimestamp();
Review comment:
Can you please add a comment here for clarity to mention that the
upsert/delete statements won't affect this timestamp and it reflects the time
of creation of the table in this case (since there is no other ALTER statement)?
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
+ table.isChangeDetectionEnabled());
Review comment:
Do we want to do some upserts and call `getEntriesForTable()` again at
this point, just to ensure that entries exist now that change detection is
enabled?
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
+ table.isChangeDetectionEnabled());
+ //set to FALSE
+ String disableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=FALSE";
+ conn.createStatement().execute(disableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it should be
disabled!",
+ table.isChangeDetectionEnabled());
+ //now upsert again
+ conn.createStatement().execute(upsertSql);
+ //check that we still didn't annotate anything
+ entries =
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ }
+ }
+
+ @Test
+ public void testCantSetChangeDetectionOnIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ builder.withTableDefaults().build();
+ try {
+ String badIndexSql =
+ "CREATE INDEX IDX_SHOULD_FAIL" + " ON " +
builder.getEntityTableName() +
+ "(COL1) "
+ + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(badIndexSql);
+ Assert.fail("Didn't throw a SQLException for setting change
detection on an " +
+ "index at create time!");
+ } catch (SQLException se) {
+ TestUtil.assertSqlExceptionCode(
+
SQLExceptionCode.CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY, se);
+ }
+ }
+ }
+
+ @Test
+ public void testUpsertAndDeleteWithGlobalIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = true;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ assertAnnotation(0, builder.getPhysicalTableIndexName(false),
Review comment:
When we expect 0 `numOccurences`, the logicalname and ddlTimestamp are
not used right? Can we pass in null or modify the `assertAnnotation()` to make
this clear?
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
##########
@@ -278,10 +227,8 @@ public void testMultiTenantViewLocalIndex() throws
Exception {
stmt.setInt(5, 400);
stmt.execute();
conn.commit();
-
- Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
- props.setProperty("TenantId", "10");
- Connection conn1 = DriverManager.getConnection(getUrl(), props);
+
+ Connection conn1 = getTenantConnection("10");
Review comment:
Is this a connection leak?
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
+ table.isChangeDetectionEnabled());
+ //set to FALSE
+ String disableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=FALSE";
+ conn.createStatement().execute(disableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it should be
disabled!",
+ table.isChangeDetectionEnabled());
+ //now upsert again
+ conn.createStatement().execute(upsertSql);
+ //check that we still didn't annotate anything
+ entries =
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ }
+ }
+
+ @Test
+ public void testCantSetChangeDetectionOnIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ builder.withTableDefaults().build();
+ try {
+ String badIndexSql =
+ "CREATE INDEX IDX_SHOULD_FAIL" + " ON " +
builder.getEntityTableName() +
+ "(COL1) "
+ + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(badIndexSql);
+ Assert.fail("Didn't throw a SQLException for setting change
detection on an " +
+ "index at create time!");
+ } catch (SQLException se) {
+ TestUtil.assertSqlExceptionCode(
+
SQLExceptionCode.CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY, se);
+ }
+ }
+ }
+
+ @Test
+ public void testUpsertAndDeleteWithGlobalIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = true;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ assertAnnotation(0, builder.getPhysicalTableIndexName(false),
+ null, builder.getTableOptions().getSchemaName(),
+
SchemaUtil.getTableNameFromFullName(builder.getEntityTableIndexName()),
+ PTableType.INDEX,
+ ddlTimestamp);
+ }
+
+ //Note that local secondary indexes aren't supported because they go in
the same WALEdit as the
+ // "base" table data they index.
+
+ private long upsertAndDeleteHelper(SchemaBuilder builder, boolean
createGlobalIndex) throws Exception {
+ try (Connection conn = getConnection()) {
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+
+ if (createGlobalIndex) {
+
builder.withTableOptions(tableOptions).withTableIndexDefaults().build();
+ } else {
+ builder.withTableOptions(tableOptions).build();
+ }
+
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', 'c')";
+ conn.createStatement().execute(upsertSql);
+ conn.commit();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change Detection Enabled is false!",
+ table.isChangeDetectionEnabled());
+ //Deleting by entire PK gets executed as more like an UPSERT
VALUES than an UPSERT SELECT
+ //(i.e, it generates the Mutations and then pushes them to server,
rather than
+ // running a select query and deleting the mutations returned)
+ String deleteSql = "DELETE FROM " + builder.getEntityTableName() +
" " +
+ "WHERE OID = 'a' AND KP = 'b'";
+ conn.createStatement().execute(deleteSql);
+ conn.commit();
+ return table.getLastDDLTimestamp();
+ }
+ }
+
+ private SchemaBuilder.TableOptions getTableOptions() {
+ SchemaBuilder.TableOptions tableOptions =
+ SchemaBuilder.TableOptions.withDefaults();
+ tableOptions.setImmutable(isImmutable);
+ tableOptions.setMultiTenant(isMultiTenant);
+ tableOptions.setChangeDetectionEnabled(true);
+ return tableOptions;
+ }
+
+ @Test
+ public void testUpsertSelectClientSide() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = getConnection()) {
+ SchemaBuilder baseBuilder = new SchemaBuilder(getUrl());
+ SchemaBuilder targetBuilder = new SchemaBuilder(getUrl());
+ //upsert selecting from a different table will force processing to
be client-side
+ baseBuilder.withTableOptions(getTableOptions()).build();
+ conn.createStatement().execute("UPSERT INTO " +
baseBuilder.getEntityTableName() + " " +
+ "VALUES" +
+ " ('a', 'b', '2', 'bc', '3')");
+ conn.commit();
+ targetBuilder.withTableOptions(getTableOptions()).build();
+ String sql = "UPSERT INTO " + targetBuilder.getEntityTableName() +
+ " (OID, KP, COL1, COL2, COL3) SELECT * FROM " +
baseBuilder.getEntityTableName();
+ conn.createStatement().execute(sql);
+ conn.commit();
+ int expectedAnnotations = 1;
+ verifyBaseAndTargetAnnotations(conn, baseBuilder, targetBuilder,
expectedAnnotations);
+ }
+ }
+
+ private void verifyBaseAndTargetAnnotations(Connection conn, SchemaBuilder
baseBuilder,
+ SchemaBuilder targetBuilder,
+ int expectedAnnotations)
throws SQLException, IOException {
+ PTable baseTable = PhoenixRuntime.getTableNoCache(conn,
+ baseBuilder.getEntityTableName());
+ assertAnnotation(expectedAnnotations,
baseBuilder.getPhysicalTableName(false), null,
+ baseBuilder.getTableOptions().getSchemaName(),
+ baseBuilder.getDataOptions().getTableName(),
+ PTableType.TABLE,
+ baseTable.getLastDDLTimestamp());
+ PTable targetTable = PhoenixRuntime.getTableNoCache(conn,
+ targetBuilder.getEntityTableName());
+ assertAnnotation(expectedAnnotations,
targetBuilder.getPhysicalTableName(false), null,
+ targetBuilder.getTableOptions().getSchemaName(),
targetBuilder.getDataOptions().getTableName(),
+ PTableType.TABLE, targetTable.getLastDDLTimestamp());
+ }
+
+ @Test
+ public void testUpsertSelectServerSide() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ Assume.assumeFalse(isImmutable); //only mutable tables can be
processed server-side
+ SchemaBuilder targetBuilder = new SchemaBuilder(getUrl());
+ try (Connection conn = getConnection()) {
+ targetBuilder.withTableOptions(getTableOptions()).build();
+ conn.createStatement().execute("UPSERT INTO " +
targetBuilder.getEntityTableName() + " " +
+ "VALUES" +
+ " ('a', 'b', '2', 'bc', '3')");
+ conn.commit();
+ conn.setAutoCommit(true); //required for server side execution
+
clearAnnotations(TableName.valueOf(targetBuilder.getPhysicalTableName(false)));
+ String sql = "UPSERT INTO " + targetBuilder.getEntityTableName() +
+ " (OID, KP, COL1, COL2, COL3) SELECT * FROM " +
targetBuilder.getEntityTableName();
+ conn.createStatement().execute(sql);
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
targetBuilder.getEntityTableName());
+ assertAnnotation(1, targetBuilder.getPhysicalTableName(false),
null,
+ targetBuilder.getTableOptions().getSchemaName(),
+ targetBuilder.getDataOptions().getTableName(),
+ PTableType.TABLE, table.getLastDDLTimestamp());
+ }
+
+ }
+
+ @Test
+ public void testGroupedUpsertSelect() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ //because we're inserting to a different table than we're selecting
from, this should be
+ // processed client-side
+ SchemaBuilder baseBuilder = new SchemaBuilder(getUrl());
+ SchemaBuilder targetBuilder = new SchemaBuilder(getUrl());
+ try (Connection conn = getConnection()) {
+ baseBuilder.withTableOptions(getTableOptions()).build();
+ targetBuilder.withTableOptions(getTableOptions()).build();
+ conn.createStatement().execute("UPSERT INTO " +
baseBuilder.getEntityTableName() + " VALUES" +
+ " ('a', 'b', '2', 'bc', '3')");
+ conn.commit();
+ String aggSql = "UPSERT INTO " +
targetBuilder.getEntityTableName() +
+ " SELECT OID, KP, MAX(COL1), MIN(COL2), MAX(COL3) FROM " +
baseBuilder.getEntityTableName() +
+ " GROUP BY OID, KP";
+ conn.createStatement().execute(aggSql);
+ conn.commit();
+ int expectedAnnotations = 1;
+ verifyBaseAndTargetAnnotations(conn, baseBuilder, targetBuilder,
expectedAnnotations);
+ }
+ }
+
+ @Test
+ public void testRangeDeleteServerSide() throws Exception {
+ boolean isClientSide = false;
+ testRangeDeleteHelper(isClientSide);
+ }
+
+ private void testRangeDeleteHelper(boolean isClientSide) throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ builder.withTableOptions(getTableOptions()).build();
+ try (Connection conn = getConnection()) {
+ conn.createStatement().execute("UPSERT INTO " +
builder.getEntityTableName() +
+ " VALUES ('a', 'b', '2', 'bc', '3')");
+ conn.commit();
+ //Deleting by a partial PK to so that it executes a SELECT and
then deletes the
+ // returned mutations
+ String sql = "DELETE FROM " + builder.getEntityTableName() + " " +
+ "WHERE OID = 'a' AND KP = 'b'";
+
+ if (isClientSide) {
+ sql += " LIMIT 1";
+ }
+ conn.setAutoCommit(!isClientSide);
+ conn.createStatement().execute(sql);
+ conn.commit();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ assertAnnotation(2, table.getPhysicalName().getString(), null,
+ table.getSchemaName().getString(),
+ table.getTableName().getString(), PTableType.TABLE,
table.getLastDDLTimestamp());
+ }
+
+ }
+
+ @Test
+ public void testRangeDeleteClientSide() throws Exception {
+ boolean isClientSide = true;
+ testRangeDeleteHelper(isClientSide);
+ }
+
+ @Test
+ public void testGlobalViewUpsert() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ try (Connection conn = getConnection()) {
+ createGlobalViewHelper(builder, conn);
+ conn.createStatement().execute("UPSERT INTO " +
builder.getEntityGlobalViewName()
+ + " VALUES" + " ('a', '" +
PhoenixTestBuilder.DDLDefaults.DEFAULT_KP +
+ "', '2', 'bc', '3', 'c')");
+ conn.commit();
+ String deleteSql = "DELETE FROM " +
builder.getEntityGlobalViewName() + " " +
+ "WHERE OID = 'a' AND KP = '" +
PhoenixTestBuilder.DDLDefaults.DEFAULT_KP + "' " +
+ "and ID = 'c'";
+ conn.createStatement().execute(deleteSql);
+ conn.commit();
+ PTable view = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityGlobalViewName());
+ assertAnnotation(2, view.getPhysicalName().getString(), null,
+ view.getSchemaName().getString(),
+ view.getTableName().getString(), PTableType.VIEW,
view.getLastDDLTimestamp());
+ }
+
+ }
+
+ private void createGlobalViewHelper(SchemaBuilder builder, Connection
conn) throws Exception {
+ builder.withTableOptions(getTableOptions()).
+ withGlobalViewOptions(getGlobalViewOptions(builder)).build();
+ PTable view = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityGlobalViewName());
+ Assert.assertTrue("View does not have change detection enabled!",
+ view.isChangeDetectionEnabled());
+ }
+
+ private SchemaBuilder.GlobalViewOptions getGlobalViewOptions(SchemaBuilder
builder) {
+ SchemaBuilder.GlobalViewOptions options =
SchemaBuilder.GlobalViewOptions.withDefaults();
+ options.setChangeDetectionEnabled(true);
+ return options;
+ }
+
+ @Test
+ public void testTenantViewUpsert() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ Assume.assumeTrue(isMultiTenant);
+ boolean createIndex = false;
+ tenantViewHelper(createIndex);
+ }
+
+ private void tenantViewHelper(boolean createIndex) throws Exception {
+ //create a base table, global view, and child tenant view, then insert
/ delete into the
+ // child tenant view. Make sure that the annotations use the tenant
view name
+ String tenant = generateUniqueName();
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ try (Connection conn = getConnection()) {
+ createGlobalViewHelper(builder, conn);
+ }
+ try (Connection conn = getTenantConnection(tenant)) {
+ SchemaBuilder.DataOptions dataOptions = builder.getDataOptions();
+ dataOptions.setTenantId(tenant);
+ if (createIndex) {
+ builder.withTenantViewOptions(getTenantViewOptions(builder)).
+
withDataOptions(dataOptions).withTenantViewIndexDefaults().build();
+ } else {
+ builder.withTenantViewOptions(getTenantViewOptions(builder)).
+ withDataOptions(dataOptions).build();
+ }
+ builder.withTenantViewOptions(getTenantViewOptions(builder)).
+
withDataOptions(dataOptions).withTenantViewIndexDefaults().build();
+ conn.createStatement().execute("UPSERT INTO " +
builder.getEntityTenantViewName()
+ + " VALUES" + " ('" +
PhoenixTestBuilder.DDLDefaults.DEFAULT_KP + "', '2', 'bc', " +
+ "'3', 'c', " + "'col4', 'col5', 'col6', 'd')");
+ conn.commit();
+ String deleteSql = "DELETE FROM " +
builder.getEntityTenantViewName() + " " +
+ "WHERE KP = '"+ PhoenixTestBuilder.DDLDefaults.DEFAULT_KP +
+ "' and COL1 = '2' AND ID = 'c' AND ZID = 'd'";
+ conn.createStatement().execute(deleteSql);
+ conn.commit();
+ PTable view = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTenantViewName());
+ assertAnnotation(2, view.getPhysicalName().getString(), tenant,
+ view.getSchemaName().getString(),
+ view.getTableName().getString(), PTableType.VIEW,
view.getLastDDLTimestamp());
+ if (createIndex) {
+ assertAnnotation(0,
+
MetaDataUtil.getViewIndexPhysicalName(builder.getEntityTableName()),
+ tenant, view.getSchemaName().getString(),
+
SchemaUtil.getTableNameFromFullName(builder.getEntityTenantViewIndexName()),
+ PTableType.INDEX,
+ view.getLastDDLTimestamp());
+ }
+ }
+
+ }
+
+ private SchemaBuilder.TenantViewOptions getTenantViewOptions(SchemaBuilder
builder) {
+ SchemaBuilder.TenantViewOptions options =
SchemaBuilder.TenantViewOptions.withDefaults();
+ options.setChangeDetectionEnabled(true);
+ return options;
+ }
+
+ @Test
+ public void testTenantViewUpsertWithIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ Assume.assumeTrue(isMultiTenant);
+ tenantViewHelper(true);
+ }
+
+ private List<Map<String, byte[]>> getEntriesForTable(TableName tableName)
throws IOException {
+ AnnotatedWALObserver c = getTestCoprocessor(tableName);
+ List<Map<String, byte[]>> entries =
c.getWalAnnotationsByTable(tableName);
+ return entries != null ? entries : new ArrayList<Map<String,
byte[]>>();
+ }
+
+ private AnnotatedWALObserver getTestCoprocessor(TableName tableName)
throws IOException {
+ HRegionInfo info =
getUtility().getHBaseCluster().getRegions(tableName).get(0).getRegionInfo();
+ WAL wal =
getUtility().getHBaseCluster().getRegionServer(0).getWAL(info);
+ WALCoprocessorHost host = wal.getCoprocessorHost();
+ return (AnnotatedWALObserver)
host.findCoprocessor(AnnotatedWALObserver.class.getName());
+ }
+
+ private void clearAnnotations(TableName tableName) throws IOException {
+ AnnotatedWALObserver observer = getTestCoprocessor(tableName);
+ observer.clearAnnotations();
+ }
+
+ private void assertAnnotation(int numOccurrences, String
physicalTableName, String tenant,
+ String schemaName,
+ String logicalTableName,
+ PTableType tableType, long ddlTimestamp)
throws IOException {
Review comment:
So the idea is that the WAL entry corresponding to an upsert/delete on
an entity (table or view) will contain an annotation which has the
`lastDDLTimestamp` of the entity and this can be used against a potential
schema registry to find what was the schema of the entity at that point in time
when the mutation was issued on it?
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
##########
@@ -682,6 +687,49 @@ private void generateMutations(final TableRef tableRef,
final long mutationTimes
values.putAll(modifiedValues);
}
+ private void annotateMutationsWithMetadata(PTable table, List<Mutation>
rowMutations) {
+ //only annotate if the change detection flag is on the table and HBase
supports
+ // preWALAppend coprocs server-side
+ if (table == null || !table.isChangeDetectionEnabled()
+ || !HbaseCompatCapabilities.hasPreWALAppend()) {
+ return;
+ }
+ //annotate each mutation with enough metadata so that anyone
interested can
+ // deterministically figure out exactly what Phoenix schema object
created the mutation
+ // Server-side we can annotate the HBase WAL with these.
+ for (Mutation mutation : rowMutations) {
+ annotateMutationWithMetadata(table, mutation);
+ }
+
+ }
+
+ public static void annotateMutationWithMetadata(PTable table, Mutation
mutation) {
+ if (mutation.getDurability() == Durability.SKIP_WAL) {
Review comment:
I think we need to do this check everywhere. Also, can this be moved to
a relevant Util class since we'd want to potentially call this method in all
places where annotation is required?
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
##########
@@ -1257,7 +1261,11 @@ public ImmutableBytesPtr getEmptyKeyValueFamily() {
// If if there are no covered columns, we know it's our default name
return emptyKeyValueCFPtr;
}
-
+
+ public String getLogicalIndexName() {
+ return logicalIndexName;
Review comment:
Is this mainly for view indexes? For local indexes we aren't doing any
annotation and for global indexes, the logical name is the name of PTable. Can
you please clarify and/or add a comment about this?
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
##########
@@ -1034,6 +1066,15 @@ private void
removeBatchMutateContext(ObserverContext<RegionCoprocessorEnvironme
this.batchMutateContext.remove();
}
+ @Override
+ public void preWALAppend(ObserverContext<RegionCoprocessorEnvironment> c,
WALKey key,
+ WALEdit edit) {
+ if (HbaseCompatCapabilities.hasPreWALAppend() && shouldWALAppend) {
Review comment:
I thought we weren't going to annotate indexes.
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
##########
@@ -50,11 +53,19 @@
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatGlobalIndexChecker;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
Review comment:
A lot of these imports seem to be unused
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java
##########
@@ -644,4 +651,33 @@ public boolean next(List<Cell> resultsToReturn) throws
IOException {
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
+
+ private void
annotateDataMutations(UngroupedAggregateRegionObserver.MutationList
mutationsList,
+ Scan scan) {
+ byte[] tenantId = null;
+ byte[] logicalTableName = null;
+ byte[] schemaName = null;
+ byte[] tableType = null;
+ byte[] ddlTimestamp = null;
+ tenantId =
+
scan.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString());
+ schemaName =
+
scan.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString());
+ logicalTableName =
+
scan.getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString());
+ tableType =
+
scan.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString());
+ ddlTimestamp =
scan.getAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString());
+
+ for (Mutation m : mutationsList) {
Review comment:
We should check WAL durability here as well right?
##########
File path:
phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
##########
@@ -139,7 +140,12 @@ public RegionServerServices getRegionServerServices() {
public ConcurrentMap<String, Object> getSharedData() {
return null;
}
- };
+
+ @Override
Review comment:
nit: Fix indentation.
Also, what is this change for?
##########
File path:
phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixTestBuilder.java
##########
@@ -644,6 +646,9 @@ public void upsertRows(int startRowIndex, int numRows)
throws Exception {
String entityTableName;
String entityGlobalViewName;
String entityTenantViewName;
+ String entityTableIndexName;
Review comment:
Can these variables be `private`?
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
##########
@@ -1160,6 +1169,12 @@ private PTable getTable(RegionScanner scanner, long
clientTimeStamp, long tableT
null :
PLong.INSTANCE.getCodec().decodeLong(lastDDLTimestampKv.getValueArray(),
lastDDLTimestampKv.getValueOffset(), SortOrder.getDefault());
+ Cell changeDetectionEnabledKv =
tableKeyValues[CHANGE_DETECTION_ENABLED_INDEX];
+ boolean isChangeDetectionEnabled = changeDetectionEnabledKv != null
Review comment:
If a new client modifies some table's CDE field, there won't be a
problem for old clients interacting with the table right?
##########
File path: phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
##########
@@ -1239,4 +1239,21 @@ public static boolean isDummy(List<Cell> result) {
Cell cell = result.get(0);
return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY,
EMPTY_BYTE_ARRAY);
}
+
+ public static void setWALAnnotationAttributes(PTable table, Scan scan) {
Review comment:
@gjacoby126 Here is my understanding overall, please correct me if I'm
wrong. There are 2 basic paths:
1. (Client-side delete) Client issues Puts/Deletes directly: This is handled
in `MutationState. annotateMutationsWithMetadata()`
2. (Server-side delete and upsert-selects) Client issues scans, the server
intercepts this and issues Put/Delete mutations based on the results read: This
is what this method handles right? My understanding is we set the scan
attributes in UpsertCompiler and DeleteCompiler and this is intercepted in
`UngroupedAggregateRegionScanner` where we read the scan attribute and set it
on the mutations.
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
##########
@@ -674,6 +678,7 @@ private void generateMutations(final TableRef tableRef,
final long mutationTimes
mutation.setAttribute(PhoenixIndexBuilder.ATOMIC_OP_ATTRIB, onDupKeyBytes);
}
}
+ annotateMutationsWithMetadata(table, rowMutations);
Review comment:
+1, there is no difference between the if and else as far as annotation
goes right?
##########
File path:
phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixTestBuilder.java
##########
@@ -1230,6 +1285,12 @@ private String buildCreateGlobalViewStmt(String
fullGlobalViewName, String fullT
.append((globalViewOptions.tableProps.isEmpty() ?
"" :
globalViewOptions.tableProps));
+ if (globalViewOptions.isChangeDetectionEnabled()) {
+ if (!globalViewOptions.tableProps.isEmpty()) {
+ statement.append(", ");
+ }
+ statement.append("CHANGE_DETECTION_ENABLED=true");
Review comment:
nit: Use a static constant representing CHANGE_DETECTION_ENABLED instead
##########
File path: phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
##########
@@ -1239,4 +1239,21 @@ public static boolean isDummy(List<Cell> result) {
Cell cell = result.get(0);
return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY,
EMPTY_BYTE_ARRAY);
}
+
+ public static void setWALAnnotationAttributes(PTable table, Scan scan) {
Review comment:
Can we add comments based on this in the compiler and/or this method.
Also a similar comment in MutationState would be helpful.
##########
File path:
phoenix-hbase-compat-1.5.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatGlobalIndexChecker.java
##########
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase.coprocessor;
+
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+
+public class CompatGlobalIndexChecker extends BaseRegionObserver {
+ //HBase 1.5+ has preWALAppend already declared in BaseRegionObserver, so
this class is only
Review comment:
Super nit: multi-line comments throughout the pr have no space for the
first line and 1 space for subsequent ones. Maybe keep it the same for all
lines, unless this is some convention I'm unaware of, in which case ignore :D
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
##########
@@ -682,6 +687,49 @@ private void generateMutations(final TableRef tableRef,
final long mutationTimes
values.putAll(modifiedValues);
}
+ private void annotateMutationsWithMetadata(PTable table, List<Mutation>
rowMutations) {
+ //only annotate if the change detection flag is on the table and HBase
supports
+ // preWALAppend coprocs server-side
+ if (table == null || !table.isChangeDetectionEnabled()
+ || !HbaseCompatCapabilities.hasPreWALAppend()) {
+ return;
+ }
+ //annotate each mutation with enough metadata so that anyone
interested can
+ // deterministically figure out exactly what Phoenix schema object
created the mutation
+ // Server-side we can annotate the HBase WAL with these.
+ for (Mutation mutation : rowMutations) {
+ annotateMutationWithMetadata(table, mutation);
+ }
+
+ }
+
+ public static void annotateMutationWithMetadata(PTable table, Mutation
mutation) {
+ if (mutation.getDurability() == Durability.SKIP_WAL) {
Review comment:
Maybe `WALAnnotationUtil`?
##########
File path:
phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
##########
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.PhoenixTestBuilder.SchemaBuilder;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+
+import static
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class WALAnnotationIT extends BaseUniqueNamesOwnClusterIT {
+ private final boolean isImmutable;
+ private final boolean isMultiTenant;
+
+ // name is used by failsafe as file name in reports
+ @Parameterized.Parameters(name =
"WALAnnotationIT_isImmutable={0}_isMultiTenant={1}")
+ public static synchronized Collection<Object[]> data() {
+ return Arrays.asList(new Object[]{true, true}, new Object[]{true,
false},
+ new Object[]{false, true}, new Object[]{false, false});
+ }
+
+ public WALAnnotationIT(boolean isImmutable, boolean isMultiTenant) {
+ this.isImmutable = isImmutable;
+ this.isMultiTenant = isMultiTenant;
+ }
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = new HashMap<>(2);
+ props.put("hbase.coprocessor.wal.classes",
+ AnnotatedWALObserver.class.getName());
+ props.put(IndexRegionObserver.PHOENIX_APPEND_METADATA_TO_WAL, "true");
+ props.put(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "true");
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testSimpleUpsertAndDelete() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = false;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ }
+
+ @Test
+ public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+ tableOptions.setChangeDetectionEnabled(false);
+ builder.withTableOptions(tableOptions).build();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it shouldn't
be!",
+ table.isChangeDetectionEnabled());
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', '2', 'bc', '3')";
+ conn.createStatement().execute(upsertSql);
+ List<Map<String, byte[]>> entries =
+
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ //now flip to TRUE so we can test disabling it
+ String enableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(enableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change detection is disabled when it should be
enabled!",
+ table.isChangeDetectionEnabled());
+ //set to FALSE
+ String disableSql =
+ "ALTER TABLE " + builder.getEntityTableName() +
+ " SET " + CHANGE_DETECTION_ENABLED + "=FALSE";
+ conn.createStatement().execute(disableSql);
+ table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertFalse("Change detection is enabled when it should be
disabled!",
+ table.isChangeDetectionEnabled());
+ //now upsert again
+ conn.createStatement().execute(upsertSql);
+ //check that we still didn't annotate anything
+ entries =
getEntriesForTable(TableName.valueOf(builder.getPhysicalTableName(false)));
+ Assert.assertEquals(0, entries.size());
+ }
+ }
+
+ @Test
+ public void testCantSetChangeDetectionOnIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ builder.withTableDefaults().build();
+ try {
+ String badIndexSql =
+ "CREATE INDEX IDX_SHOULD_FAIL" + " ON " +
builder.getEntityTableName() +
+ "(COL1) "
+ + CHANGE_DETECTION_ENABLED + "=TRUE";
+ conn.createStatement().execute(badIndexSql);
+ Assert.fail("Didn't throw a SQLException for setting change
detection on an " +
+ "index at create time!");
+ } catch (SQLException se) {
+ TestUtil.assertSqlExceptionCode(
+
SQLExceptionCode.CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY, se);
+ }
+ }
+ }
+
+ @Test
+ public void testUpsertAndDeleteWithGlobalIndex() throws Exception {
+ Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
+ SchemaBuilder builder = new SchemaBuilder(getUrl());
+ boolean createGlobalIndex = true;
+ long ddlTimestamp = upsertAndDeleteHelper(builder, createGlobalIndex);
+ assertAnnotation(2, builder.getPhysicalTableName(false), null,
+ builder.getTableOptions().getSchemaName(),
+ builder.getDataOptions().getTableName(), PTableType.TABLE,
ddlTimestamp);
+ assertAnnotation(0, builder.getPhysicalTableIndexName(false),
+ null, builder.getTableOptions().getSchemaName(),
+
SchemaUtil.getTableNameFromFullName(builder.getEntityTableIndexName()),
+ PTableType.INDEX,
+ ddlTimestamp);
+ }
+
+ //Note that local secondary indexes aren't supported because they go in
the same WALEdit as the
+ // "base" table data they index.
+
+ private long upsertAndDeleteHelper(SchemaBuilder builder, boolean
createGlobalIndex) throws Exception {
+ try (Connection conn = getConnection()) {
+ SchemaBuilder.TableOptions tableOptions = getTableOptions();
+
+ if (createGlobalIndex) {
+
builder.withTableOptions(tableOptions).withTableIndexDefaults().build();
+ } else {
+ builder.withTableOptions(tableOptions).build();
+ }
+
+ String upsertSql = "UPSERT INTO " + builder.getEntityTableName() +
" VALUES" +
+ " ('a', 'b', 'c')";
+ conn.createStatement().execute(upsertSql);
+ conn.commit();
+ PTable table = PhoenixRuntime.getTableNoCache(conn,
builder.getEntityTableName());
+ Assert.assertTrue("Change Detection Enabled is false!",
+ table.isChangeDetectionEnabled());
+ //Deleting by entire PK gets executed as more like an UPSERT
VALUES than an UPSERT SELECT
Review comment:
Even if the delete is issued as a select from the client and delete
mutations are issued for those, we would still get the same mutations as if the
client had directly issued DELETE mutations right? And then WAL annotations
wouldn't change either in both scenarios right?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]