This is an automated email from the ASF dual-hosted git repository.
vjasani pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git
The following commit(s) were added to refs/heads/5.1 by this push:
new b99c271eda PHOENIX-6985 Setting server-side masking flag default to
false (#1634)
b99c271eda is described below
commit b99c271eda69c5ca8e25624b9a8bb2d2c756320c
Author: Lokesh Khurana <[email protected]>
AuthorDate: Tue Jun 27 22:14:05 2023 -0700
PHOENIX-6985 Setting server-side masking flag default to false (#1634)
---
.../apache/phoenix/end2end/PhoenixTTLToolIT.java | 15 +++
.../java/org/apache/phoenix/end2end/ViewTTLIT.java | 17 ++++
.../phoenix/end2end/ViewTTLNotEnabledIT.java | 105 +++++++++++++++++++++
.../phoenix/query/ConnectionQueryServicesImpl.java | 5 +-
.../apache/phoenix/query/QueryServicesOptions.java | 2 +-
5 files changed, 142 insertions(+), 2 deletions(-)
diff --git
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixTTLToolIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixTTLToolIT.java
index d1d101f287..f35f933158 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixTTLToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixTTLToolIT.java
@@ -26,10 +26,15 @@ import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.RegexStringComparator;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.mapreduce.PhoenixTTLTool;
import org.apache.phoenix.mapreduce.util.PhoenixMultiInputUtil;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.TestUtil;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -37,6 +42,7 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.Statement;
+import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -44,6 +50,15 @@ import static org.junit.Assert.assertTrue;
@Category(NeedsOwnMiniClusterTest.class)
public class PhoenixTTLToolIT extends ParallelStatsDisabledIT {
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Integer.toString(60*60)); // An hour
+ props.put(QueryServices.USE_STATS_FOR_PARALLELIZATION,
Boolean.toString(false));
+ props.put(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED,
Boolean.toString(true));
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
private final long PHOENIX_TTL_EXPIRE_IN_A_SECOND = 1;
private final long MILLISECOND = 1000;
private final long PHOENIX_TTL_EXPIRE_IN_A_DAY = 1000 * 60 * 60 * 24;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
index 0c66c11dfc..34eedbf8f8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
@@ -61,10 +61,12 @@ import
org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.apache.phoenix.util.LogUtil;
import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.ScanUtil;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.TestUtil;
import org.junit.Assert;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
@@ -77,6 +79,7 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
@@ -120,6 +123,20 @@ public class ViewTTLIT extends ParallelStatsDisabledIT {
private static final String COL8_FMT = "h%05d";
private static final String COL9_FMT = "i%05d";
+ protected static void setUpTestDriver(ReadOnlyProps props) throws
Exception {
+ setUpTestDriver(props, props);
+ }
+
+ @BeforeClass
+ public static final void doSetup() throws Exception {
+ // Turn on the PHOENIX_TTL feature
+ Map<String, String> DEFAULT_PROPERTIES = new HashMap<String, String>()
{{
+ put(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED,
String.valueOf(true));
+ }};
+
+ setUpTestDriver(new ReadOnlyProps(ReadOnlyProps.EMPTY_PROPS,
DEFAULT_PROPERTIES.entrySet().iterator()));
+ }
+
// Scans the HBase rows directly and asserts
private void assertUsingHBaseRows(byte[] hbaseTableName,
long minTimestamp, int expectedRows) throws IOException,
SQLException {
diff --git
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLNotEnabledIT.java
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLNotEnabledIT.java
new file mode 100644
index 0000000000..f0e549149a
--- /dev/null
+++
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLNotEnabledIT.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.coprocessor.PhoenixTTLRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
+import org.apache.phoenix.util.ScanUtil;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+import java.util.Properties;
+
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class ViewTTLNotEnabledIT extends ParallelStatsDisabledIT {
+
+ @Test
+ public void testPhoenixTTLNotEnabled() throws Exception {
+
+ // PHOENIX TTL is set in seconds (for e.g 10 secs)
+ long phoenixTTL = 10;
+ PhoenixTestBuilder.SchemaBuilder.TableOptions
+ tableOptions =
PhoenixTestBuilder.SchemaBuilder.TableOptions.withDefaults();
+ tableOptions.getTableColumns().clear();
+ tableOptions.getTableColumnTypes().clear();
+
+ PhoenixTestBuilder.SchemaBuilder.TenantViewOptions
+ tenantViewOptions =
PhoenixTestBuilder.SchemaBuilder.TenantViewOptions.withDefaults();
+ tenantViewOptions.setTableProps(String.format("PHOENIX_TTL=%d",
phoenixTTL));
+
+ // Define the test schema.
+ final PhoenixTestBuilder.SchemaBuilder schemaBuilder = new
PhoenixTestBuilder.SchemaBuilder(url);
+ schemaBuilder
+ .withTableOptions(tableOptions)
+ .withTenantViewOptions(tenantViewOptions)
+ .build();
+
+ String viewName = schemaBuilder.getEntityTenantViewName();
+
+ Properties props = new Properties();
+ String tenantConnectUrl =
+ url + ';' + TENANT_ID_ATTRIB + '=' +
schemaBuilder.getDataOptions().getTenantId();
+
+ // Test the coproc is not registered
+ org.apache.hadoop.hbase.client.Connection hconn =
getUtility().getConnection();
+ Admin admin = hconn.getAdmin();
+ HTableDescriptor tableDescriptor = admin.getTableDescriptor(
+ TableName.valueOf(schemaBuilder.getEntityTableName()));
+ Assert.assertFalse("Coprocessor " +
PhoenixTTLRegionObserver.class.getName()
+ + " should not have been added: ",
+
tableDescriptor.hasCoprocessor(PhoenixTTLRegionObserver.class.getName()));
+
+
+ // Test masking expired rows property are not set
+ try (Connection conn = DriverManager.getConnection(tenantConnectUrl,
props);
+ final Statement statement = conn.createStatement()) {
+ conn.setAutoCommit(true);
+
+ final String stmtString = String.format("select * from %s",
viewName);
+ Preconditions.checkNotNull(stmtString);
+ final PhoenixStatement pstmt =
statement.unwrap(PhoenixStatement.class);
+ final QueryPlan queryPlan = pstmt.optimizeQuery(stmtString);
+
+ PhoenixResultSet
+ rs = pstmt.newResultSet(queryPlan.iterator(),
queryPlan.getProjector(), queryPlan.getContext());
+ Assert.assertFalse("Should not have any rows", rs.next());
+ Assert.assertEquals("Should have at least one element", 1,
queryPlan.getScans().size());
+ Assert.assertEquals("PhoenixTTL should not be set",
+ 0,
ScanUtil.getPhoenixTTL(queryPlan.getScans().get(0).get(0)));
+ Assert.assertFalse("Masking attribute should not be set",
+
ScanUtil.isMaskTTLExpiredRows(queryPlan.getScans().get(0).get(0)));
+ Assert.assertFalse("Delete Expired attribute should not set",
+
ScanUtil.isDeleteTTLExpiredRows(queryPlan.getScans().get(0).get(0)));
+ }
+ }
+
+}
diff --git
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index c0fa6d8bc5..63fc1a3994 100644
---
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1014,6 +1014,8 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
QueryServicesOptions.DEFAULT_INDEX_REGION_OBSERVER_ENABLED);
boolean isViewIndex = TRUE_BYTES_AS_STRING
.equals(tableProps.get(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_NAME));
+ boolean isServerSideMaskingEnabled =
config.getBoolean(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED,
+ QueryServicesOptions.DEFAULT_SERVER_SIDE_MASKING_ENABLED);
boolean isViewBaseTransactional = false;
if (!isTransactional && isViewIndex) {
@@ -1177,7 +1179,8 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
// The priority for this co-processor should be set higher than
the GlobalIndexChecker so that the read repair scans
// are intercepted by the TTLAwareRegionObserver and only the rows
that are not ttl-expired are returned.
if (!SchemaUtil.isSystemTable(tableName)) {
- if
(!newDesc.hasCoprocessor(PhoenixTTLRegionObserver.class.getName())) {
+ if
(!newDesc.hasCoprocessor(PhoenixTTLRegionObserver.class.getName()) &&
+ isServerSideMaskingEnabled) {
builder.addCoprocessor(
PhoenixTTLRegionObserver.class.getName(), null,
priority-2, null);
}
diff --git
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 7c73e3d342..c3cabc6b32 100644
---
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -158,7 +158,7 @@ public class QueryServicesOptions {
public static final int DEFAULT_TRACING_TRACE_BUFFER_SIZE = 1000;
public static final int DEFAULT_MAX_INDEXES_PER_TABLE = 10;
public static final int DEFAULT_CLIENT_INDEX_ASYNC_THRESHOLD = 0;
- public static final boolean DEFAULT_SERVER_SIDE_MASKING_ENABLED = true;
+ public static final boolean DEFAULT_SERVER_SIDE_MASKING_ENABLED = false;
public final static int DEFAULT_MUTATE_BATCH_SIZE = 100; // Batch size for
UPSERT SELECT and DELETE
//Batch size in bytes for UPSERT, SELECT and DELETE. By default, 2MB