[37/50] [abbrv] phoenix git commit: Revert "PHOENIX-4790 Simplify check for client side delete"

2018-07-25 Thread elserj
Revert "PHOENIX-4790 Simplify check for client side delete"

This reverts commit d35a7519e65174978657b4c6254d595fac1b0009.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bc4ca79e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bc4ca79e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bc4ca79e

Branch: refs/heads/4.x-HBase-1.4
Commit: bc4ca79ee0f7c1074f390c8ad8cd7b0bc51169f3
Parents: 164b2da
Author: James Taylor 
Authored: Thu Jul 12 19:53:04 2018 -0700
Committer: James Taylor 
Committed: Thu Jul 12 19:53:22 2018 -0700

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 19 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc4ca79e/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 78b2db9..5f9c76c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -46,6 +46,7 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
+import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -480,7 +481,6 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
-boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -492,8 +492,6 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
-// We must run a query if any index contains a non pk 
column
-noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -513,7 +511,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-noQueryReqd &= !hasPreOrPostProcessing;
+boolean noQueryReqd = !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -552,8 +550,24 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
+
+// We need to have all indexed columns available in all immutable 
indexes in order
+// to generate the delete markers from the query. We also cannot have 
any filters
+// except for our SkipScanFilter for point lookups.
+// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
+// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
+// If we can generate a plan for every index, that means all the 
required columns are available in every index,
+// hence we can drive the delete from any of the plans.
 noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
+int queryPlanIndex = 0;
+while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
+QueryPlan plan = queryPlans.get(queryPlanIndex++);
+StatementContext context = plan.getContext();
+noQueryReqd &= (!context.getScan().hasFilter()
+  

[37/50] [abbrv] phoenix git commit: Revert "PHOENIX-4790 Simplify check for client side delete"

2018-07-25 Thread elserj
Revert "PHOENIX-4790 Simplify check for client side delete"

This reverts commit 35366b37106833b43f69ed712e0e3fd1635842cb.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0af8b1e3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0af8b1e3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0af8b1e3

Branch: refs/heads/master
Commit: 0af8b1e32c940fe14a66f23240013b4d702d8ec6
Parents: 2b43bea
Author: James Taylor 
Authored: Thu Jul 12 20:10:10 2018 -0700
Committer: James Taylor 
Committed: Thu Jul 12 20:10:10 2018 -0700

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 19 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0af8b1e3/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index f4e8896..5ed4130 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -47,6 +47,7 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
+import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -481,7 +482,6 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
-boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -493,8 +493,6 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
-// We must run a query if any index contains a non pk 
column
-noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -514,7 +512,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-noQueryReqd &= !hasPreOrPostProcessing;
+boolean noQueryReqd = !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -553,8 +551,24 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
+
+// We need to have all indexed columns available in all immutable 
indexes in order
+// to generate the delete markers from the query. We also cannot have 
any filters
+// except for our SkipScanFilter for point lookups.
+// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
+// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
+// If we can generate a plan for every index, that means all the 
required columns are available in every index,
+// hence we can drive the delete from any of the plans.
 noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
+int queryPlanIndex = 0;
+while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
+QueryPlan plan = queryPlans.get(queryPlanIndex++);
+StatementContext context = plan.getContext();
+noQueryReqd &= (!context.getScan().hasFilter()
+