git commit: PHOENIX-910 Filter should override hasFilterRow() when filterRow() is overridden.(Ted)

2014-10-23 Thread anoopsamjohn
Repository: phoenix
Updated Branches:
  refs/heads/master fba06a80f - 06a511cff


PHOENIX-910 Filter should override hasFilterRow() when filterRow() is 
overridden.(Ted)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/06a511cf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/06a511cf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/06a511cf

Branch: refs/heads/master
Commit: 06a511cffe2e04bf85db3d7fa6ade8327ee24cd6
Parents: fba06a8
Author: anoopsjohn anoopsamj...@gmail.com
Authored: Fri Oct 24 10:54:12 2014 +0530
Committer: anoopsjohn anoopsamj...@gmail.com
Committed: Fri Oct 24 10:54:12 2014 +0530

--
 .../org/apache/phoenix/filter/BooleanExpressionFilter.java | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/06a511cf/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
index e0caf9f..c5b36b2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
@@ -37,6 +37,7 @@ import org.apache.phoenix.util.ServerUtil;
  * 
  * Base class for filter that evaluates a WHERE clause expression.
  *
+ * Subclass is expected to implement filterRow() method
  * 
  * @since 0.1
  */
@@ -57,6 +58,11 @@ abstract public class BooleanExpressionFilter extends 
FilterBase implements Writ
 }
 
 @Override
+public boolean hasFilterRow() {
+  return true;
+}
+
+@Override
 public int hashCode() {
 final int prime = 31;
 int result = 1;



git commit: PHOENIX-1245 Remove usage of empty KeyValue object BATCH_MARKER from Indexer.

2014-09-12 Thread anoopsamjohn
Repository: phoenix
Updated Branches:
  refs/heads/master 20c2d9000 - 20e7559b4


PHOENIX-1245 Remove usage of empty KeyValue object BATCH_MARKER from Indexer.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/20e7559b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/20e7559b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/20e7559b

Branch: refs/heads/master
Commit: 20e7559b4406e8e0432d00348686cfd764e2d53e
Parents: 20c2d90
Author: anoopsjohn anoopsamj...@gmail.com
Authored: Sat Sep 13 08:06:51 2014 +0530
Committer: anoopsjohn anoopsamj...@gmail.com
Committed: Sat Sep 13 08:06:51 2014 +0530

--
 .../org/apache/phoenix/hbase/index/Indexer.java | 61 ++--
 1 file changed, 4 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/20e7559b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 975621c..c170c89 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -106,14 +106,6 @@ public class Indexer extends BaseRegionObserver {
   private static final String INDEX_RECOVERY_FAILURE_POLICY_KEY = 
org.apache.hadoop.hbase.index.recovery.failurepolicy;
 
   /**
-   * Marker {@link KeyValue} to indicate that we are doing a batch operation. 
Needed because the
-   * coprocessor framework throws away the WALEdit from the prePut/preDelete 
hooks when checking a
-   * batch if there were no {@link KeyValue}s attached to the {@link WALEdit}. 
When you get down to
-   * the preBatch hook, there won't be any WALEdits to which to add the index 
updates.
-   */
-  private static KeyValue BATCH_MARKER = new KeyValue();
-
-  /**
* cache the failed updates to the various regions. Used for making the WAL 
recovery mechanisms
* more robust in the face of recoverying index regions that were on the 
same server as the
* primary table region
@@ -201,43 +193,6 @@ public class Indexer extends BaseRegionObserver {
   }
 
   @Override
-  public void prePut(final ObserverContextRegionCoprocessorEnvironment c, 
final Put put,
-  final WALEdit edit, final Durability durability) throws IOException {
-  if (this.disabled) {
-  super.prePut(c, put, edit, durability);
-  return;
-  }
-  preSingleUpdate(c, put, edit, durability);
-  }
-
-  @Override
-  public void preDelete(ObserverContextRegionCoprocessorEnvironment e, 
Delete delete,
-  WALEdit edit, final Durability durability) throws IOException {
-  if (this.disabled) {
-  super.preDelete(e, delete, edit, durability);
-  return;
-  }
-  preSingleUpdate(e, delete, edit, durability);
-  }
-
-  /**
-   * Process the prePut and preDelete methods. These need to be handled so the 
preBatchMutate method
-   * can function properly.
-   * p
-   * As of HBase 0.96, these can all go through the same mechanism as puts and 
deletes all go
-   * through the batchMutation mechanism in HRegion. Previously, {@link 
Delete} had a separate path,
-   * which caused some interesting problems for managing WALs, but see older 
versions of Phoenix for
-   * more information there.
-   */
-  @SuppressWarnings(javadoc)
-  public void preSingleUpdate(final 
ObserverContextRegionCoprocessorEnvironment c, final Mutation put,
-  final WALEdit edit, final Durability durability) throws IOException {
-  // just have to add a batch marker to the WALEdit so we get the edit 
again in the batch
-  // processing step. We let it throw an exception here because something 
terrible has happened.
-  edit.add(BATCH_MARKER);
-  }
-
-  @Override
   public void preBatchMutate(ObserverContextRegionCoprocessorEnvironment c,
   MiniBatchOperationInProgressMutation miniBatchOp) throws IOException {
   if (this.disabled) {
@@ -269,18 +224,6 @@ public class Indexer extends BaseRegionObserver {
 }
 Durability durability = Durability.SKIP_WAL;
 for (int i = 0; i  miniBatchOp.size(); i++) {
-  // remove the batch keyvalue marker - its added for all puts
-  WALEdit edit = miniBatchOp.getWalEdit(i);
-  // we don't have a WALEdit for immutable index cases, which still see 
this path
-  // we could check is indexing is enable for the mutation in prePut and 
then just skip this
-  // after checking here, but this saves us the checking again.
-  if (edit != null) {
-KeyValue kv = edit.getKeyValues().get(0);
-if (kv == BATCH_MARKER) {
-  // remove batch marker 

git commit: PHOENIX-1245 Remove usage of empty KeyValue object BATCH_MARKER from Indexer.

2014-09-12 Thread anoopsamjohn
Repository: phoenix
Updated Branches:
  refs/heads/4.0 ce1dd85a5 - caa7848cf


PHOENIX-1245 Remove usage of empty KeyValue object BATCH_MARKER from Indexer.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/caa7848c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/caa7848c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/caa7848c

Branch: refs/heads/4.0
Commit: caa7848cfc89461545e3ab7f9ff902f474446b5b
Parents: ce1dd85
Author: anoopsjohn anoopsamj...@gmail.com
Authored: Sat Sep 13 08:15:28 2014 +0530
Committer: anoopsjohn anoopsamj...@gmail.com
Committed: Sat Sep 13 08:15:28 2014 +0530

--
 .../org/apache/phoenix/hbase/index/Indexer.java | 61 ++--
 1 file changed, 4 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/caa7848c/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 975621c..c170c89 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -106,14 +106,6 @@ public class Indexer extends BaseRegionObserver {
   private static final String INDEX_RECOVERY_FAILURE_POLICY_KEY = 
org.apache.hadoop.hbase.index.recovery.failurepolicy;
 
   /**
-   * Marker {@link KeyValue} to indicate that we are doing a batch operation. 
Needed because the
-   * coprocessor framework throws away the WALEdit from the prePut/preDelete 
hooks when checking a
-   * batch if there were no {@link KeyValue}s attached to the {@link WALEdit}. 
When you get down to
-   * the preBatch hook, there won't be any WALEdits to which to add the index 
updates.
-   */
-  private static KeyValue BATCH_MARKER = new KeyValue();
-
-  /**
* cache the failed updates to the various regions. Used for making the WAL 
recovery mechanisms
* more robust in the face of recoverying index regions that were on the 
same server as the
* primary table region
@@ -201,43 +193,6 @@ public class Indexer extends BaseRegionObserver {
   }
 
   @Override
-  public void prePut(final ObserverContextRegionCoprocessorEnvironment c, 
final Put put,
-  final WALEdit edit, final Durability durability) throws IOException {
-  if (this.disabled) {
-  super.prePut(c, put, edit, durability);
-  return;
-  }
-  preSingleUpdate(c, put, edit, durability);
-  }
-
-  @Override
-  public void preDelete(ObserverContextRegionCoprocessorEnvironment e, 
Delete delete,
-  WALEdit edit, final Durability durability) throws IOException {
-  if (this.disabled) {
-  super.preDelete(e, delete, edit, durability);
-  return;
-  }
-  preSingleUpdate(e, delete, edit, durability);
-  }
-
-  /**
-   * Process the prePut and preDelete methods. These need to be handled so the 
preBatchMutate method
-   * can function properly.
-   * p
-   * As of HBase 0.96, these can all go through the same mechanism as puts and 
deletes all go
-   * through the batchMutation mechanism in HRegion. Previously, {@link 
Delete} had a separate path,
-   * which caused some interesting problems for managing WALs, but see older 
versions of Phoenix for
-   * more information there.
-   */
-  @SuppressWarnings(javadoc)
-  public void preSingleUpdate(final 
ObserverContextRegionCoprocessorEnvironment c, final Mutation put,
-  final WALEdit edit, final Durability durability) throws IOException {
-  // just have to add a batch marker to the WALEdit so we get the edit 
again in the batch
-  // processing step. We let it throw an exception here because something 
terrible has happened.
-  edit.add(BATCH_MARKER);
-  }
-
-  @Override
   public void preBatchMutate(ObserverContextRegionCoprocessorEnvironment c,
   MiniBatchOperationInProgressMutation miniBatchOp) throws IOException {
   if (this.disabled) {
@@ -269,18 +224,6 @@ public class Indexer extends BaseRegionObserver {
 }
 Durability durability = Durability.SKIP_WAL;
 for (int i = 0; i  miniBatchOp.size(); i++) {
-  // remove the batch keyvalue marker - its added for all puts
-  WALEdit edit = miniBatchOp.getWalEdit(i);
-  // we don't have a WALEdit for immutable index cases, which still see 
this path
-  // we could check is indexing is enable for the mutation in prePut and 
then just skip this
-  // after checking here, but this saves us the checking again.
-  if (edit != null) {
-KeyValue kv = edit.getKeyValues().get(0);
-if (kv == BATCH_MARKER) {
-  // remove batch marker from the 

git commit: PHOENIX-1203 Uable to work for count (distinct col) queries via phoenix table with secondary indexes.

2014-08-31 Thread anoopsamjohn
Repository: phoenix
Updated Branches:
  refs/heads/master 867af78dd - 4774c6332


PHOENIX-1203 Uable to work for count (distinct col) queries via phoenix table 
with secondary indexes.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4774c633
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4774c633
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4774c633

Branch: refs/heads/master
Commit: 4774c63320517fed15c4f4e3ef0a03fdbc597b06
Parents: 867af78
Author: anoopsjohn anoopsamj...@gmail.com
Authored: Mon Sep 1 09:13:38 2014 +0530
Committer: anoopsjohn anoopsamj...@gmail.com
Committed: Mon Sep 1 09:13:38 2014 +0530

--
 .../apache/phoenix/end2end/DistinctCountIT.java | 45 
 .../apache/phoenix/parse/ParseNodeFactory.java  |  2 +-
 2 files changed, 46 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4774c633/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
index 62fa0f5..4b76d29 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
@@ -45,6 +45,7 @@ import java.sql.ResultSet;
 import java.sql.Types;
 import java.util.Properties;
 
+import org.apache.phoenix.schema.TableAlreadyExistsException;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -458,4 +459,48 @@ public class DistinctCountIT extends 
BaseClientManagedTimeIT {
 conn.close();
 }
 }
+
+@Test
+public void testDistinctCountOnIndexTab() throws Exception {
+String ddl = create table personal_details (id integer not null, 
first_name char(15),\n
++ last_name char(15), CONSTRAINT pk PRIMARY KEY (id));
+Properties props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(), props);
+try {
+PreparedStatement stmt = conn.prepareStatement(ddl);
+stmt.execute(ddl);
+conn.createStatement().execute(CREATE INDEX personal_details_idx 
ON personal_details(first_name));
+} catch (TableAlreadyExistsException e) {
+
+} finally {
+conn.close();
+}
+
+conn = DriverManager.getConnection(getUrl(), props);
+try {
+PreparedStatement stmt = conn.prepareStatement(upsert into 
personal_details(id, first_name, 
++ last_name) VALUES (?, ?, ?));
+stmt.setInt(1, 1);
+stmt.setString(2, NAME1);
+stmt.setString(3, LN);
+stmt.execute();
+stmt.setInt(1, 2);
+stmt.setString(2, NAME1);
+stmt.setString(3, LN2);
+stmt.execute();
+stmt.setInt(1, 3);
+stmt.setString(2, NAME2);
+stmt.setString(3, LN3);
+stmt.execute();
+conn.commit();
+
+String query = SELECT COUNT (DISTINCT first_name) FROM 
personal_details;
+PreparedStatement statement = conn.prepareStatement(query);
+ResultSet rs = statement.executeQuery();
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+} finally {
+conn.close();
+}
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4774c633/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 5125340..6872f8a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -345,7 +345,7 @@ public class ParseNodeFactory {
 if 
(CountAggregateFunction.NAME.equals(SchemaUtil.normalizeIdentifier(name))) {
 BuiltInFunctionInfo info = getInfo(
 
SchemaUtil.normalizeIdentifier(DistinctCountAggregateFunction.NAME), args);
-return new DistinctCountParseNode(name, args, info);
+return new 
DistinctCountParseNode(DistinctCountAggregateFunction.NAME, args, info);
 } else {
 throw new UnsupportedOperationException(DISTINCT not supported 
with  + name);
 }



git commit: PHOENIX-1203 Uable to work for count (distinct col) queries via phoenix table with secondary indexes.

2014-08-31 Thread anoopsamjohn
Repository: phoenix
Updated Branches:
  refs/heads/4.0 6fb2b22b9 - 214a4ccf8


PHOENIX-1203 Uable to work for count (distinct col) queries via phoenix table 
with secondary indexes.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/214a4ccf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/214a4ccf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/214a4ccf

Branch: refs/heads/4.0
Commit: 214a4ccf89da50c47158b71e9d17418a0c3e6fae
Parents: 6fb2b22
Author: anoopsjohn anoopsamj...@gmail.com
Authored: Mon Sep 1 09:16:04 2014 +0530
Committer: anoopsjohn anoopsamj...@gmail.com
Committed: Mon Sep 1 09:16:04 2014 +0530

--
 .../apache/phoenix/end2end/DistinctCountIT.java | 45 
 .../apache/phoenix/parse/ParseNodeFactory.java  |  2 +-
 2 files changed, 46 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/214a4ccf/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
index 62fa0f5..4b76d29 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
@@ -45,6 +45,7 @@ import java.sql.ResultSet;
 import java.sql.Types;
 import java.util.Properties;
 
+import org.apache.phoenix.schema.TableAlreadyExistsException;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -458,4 +459,48 @@ public class DistinctCountIT extends 
BaseClientManagedTimeIT {
 conn.close();
 }
 }
+
+@Test
+public void testDistinctCountOnIndexTab() throws Exception {
+String ddl = create table personal_details (id integer not null, 
first_name char(15),\n
++ last_name char(15), CONSTRAINT pk PRIMARY KEY (id));
+Properties props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(), props);
+try {
+PreparedStatement stmt = conn.prepareStatement(ddl);
+stmt.execute(ddl);
+conn.createStatement().execute(CREATE INDEX personal_details_idx 
ON personal_details(first_name));
+} catch (TableAlreadyExistsException e) {
+
+} finally {
+conn.close();
+}
+
+conn = DriverManager.getConnection(getUrl(), props);
+try {
+PreparedStatement stmt = conn.prepareStatement(upsert into 
personal_details(id, first_name, 
++ last_name) VALUES (?, ?, ?));
+stmt.setInt(1, 1);
+stmt.setString(2, NAME1);
+stmt.setString(3, LN);
+stmt.execute();
+stmt.setInt(1, 2);
+stmt.setString(2, NAME1);
+stmt.setString(3, LN2);
+stmt.execute();
+stmt.setInt(1, 3);
+stmt.setString(2, NAME2);
+stmt.setString(3, LN3);
+stmt.execute();
+conn.commit();
+
+String query = SELECT COUNT (DISTINCT first_name) FROM 
personal_details;
+PreparedStatement statement = conn.prepareStatement(query);
+ResultSet rs = statement.executeQuery();
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+} finally {
+conn.close();
+}
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/214a4ccf/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 5125340..6872f8a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -345,7 +345,7 @@ public class ParseNodeFactory {
 if 
(CountAggregateFunction.NAME.equals(SchemaUtil.normalizeIdentifier(name))) {
 BuiltInFunctionInfo info = getInfo(
 
SchemaUtil.normalizeIdentifier(DistinctCountAggregateFunction.NAME), args);
-return new DistinctCountParseNode(name, args, info);
+return new 
DistinctCountParseNode(DistinctCountAggregateFunction.NAME, args, info);
 } else {
 throw new UnsupportedOperationException(DISTINCT not supported 
with  + name);
 }



git commit: PHOENIX-1102 Query Finds No Rows When Using Multiple Column Families in where clause. (Anoop)

2014-07-22 Thread anoopsamjohn
Repository: phoenix
Updated Branches:
  refs/heads/4.0 6e523cfa9 - 3049fe222


PHOENIX-1102 Query Finds No Rows When Using Multiple Column Families in where 
clause. (Anoop)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3049fe22
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3049fe22
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3049fe22

Branch: refs/heads/4.0
Commit: 3049fe222796154ce71c3d79d0419874471d26f1
Parents: 6e523cf
Author: anoopsjohn anoopsamj...@gmail.com
Authored: Tue Jul 22 22:50:28 2014 +0530
Committer: anoopsjohn anoopsamj...@gmail.com
Committed: Tue Jul 22 22:50:28 2014 +0530

--
 .../end2end/ColumnProjectionOptimizationIT.java | 16 +
 .../MultiCFCQKeyValueComparisonFilter.java  | 19 +--
 .../filter/MultiCQKeyValueComparisonFilter.java | 19 ---
 .../filter/MultiKeyValueComparisonFilter.java   | 11 +++
 .../java/org/apache/phoenix/query/BaseTest.java | 34 ++--
 5 files changed, 66 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3049fe22/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
index 9261125..0f8f54a 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
@@ -37,6 +37,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.math.BigDecimal;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -308,4 +309,19 @@ public class ColumnProjectionOptimizationIT extends 
BaseClientManagedTimeIT {
 admin.close();
 }
 }
+
+@Test
+public void testSelectWithConditionOnMultiCF() throws Exception {
+initMultiCFTable(getUrl());
+Connection conn = DriverManager.getConnection(getUrl());
+String query = SELECT c.db_cpu_utilization FROM MULTI_CF WHERE 
a.unique_user_count = ? and b.unique_org_count = ?;
+PreparedStatement statement = conn.prepareStatement(query);
+statement.setInt(1, 1);
+statement.setInt(2, 1);
+ResultSet rs = statement.executeQuery();
+boolean b = rs.next();
+assertTrue(b);
+assertEquals(BigDecimal.valueOf(40.1), rs.getBigDecimal(1));
+assertFalse(rs.next());
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3049fe22/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
index 71aff46..9147f1a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
@@ -18,12 +18,10 @@
 package org.apache.phoenix.filter;
 
 import java.io.IOException;
-import java.util.TreeSet;
 
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
-
 import org.apache.phoenix.expression.Expression;
 
 
@@ -31,13 +29,12 @@ import org.apache.phoenix.expression.Expression;
  *
  * Filter that evaluates WHERE clause expression, used in the case where there
  * are references to multiple column qualifiers over multiple column families.
- *
+ * Also there same qualifier names in different families.
  * 
  * @since 0.1
  */
 public class MultiCFCQKeyValueComparisonFilter extends 
MultiKeyValueComparisonFilter {
 private final ImmutablePairBytesPtr ptr = new ImmutablePairBytesPtr();
-private TreeSetbyte[] cfSet;
 
 public MultiCFCQKeyValueComparisonFilter() {
 }
@@ -47,12 +44,6 @@ public class MultiCFCQKeyValueComparisonFilter extends 
MultiKeyValueComparisonFi
 }
 
 @Override
-protected void init() {
-cfSet = new TreeSetbyte[](Bytes.BYTES_COMPARATOR);
-super.init();
-}
-
-@Override
 protected Object setColumnKey(byte[] cf, int cfOffset, int cfLength,
 byte[] cq, int cqOffset, int cqLength) {
 ptr.set(cf, cfOffset, cfLength, cq, cqOffset, cqLength);
@@ -125,14 +116,6 

git commit: PHOENIX-1102 Query Finds No Rows When Using Multiple Column Families in where clause. (Anoop)

2014-07-22 Thread anoopsamjohn
Repository: phoenix
Updated Branches:
  refs/heads/3.0 38de8fd56 - 343d9262c


PHOENIX-1102 Query Finds No Rows When Using Multiple Column Families in where 
clause. (Anoop)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/343d9262
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/343d9262
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/343d9262

Branch: refs/heads/3.0
Commit: 343d9262cda3a10461bb301ee0089e6df3867d99
Parents: 38de8fd
Author: anoopsjohn anoopsamj...@gmail.com
Authored: Tue Jul 22 22:51:49 2014 +0530
Committer: anoopsjohn anoopsamj...@gmail.com
Committed: Tue Jul 22 22:51:49 2014 +0530

--
 .../end2end/ColumnProjectionOptimizationIT.java | 16 +++
 .../MultiCFCQKeyValueComparisonFilter.java  | 20 +
 .../filter/MultiCQKeyValueComparisonFilter.java | 19 -
 .../filter/MultiKeyValueComparisonFilter.java   | 11 +++
 .../java/org/apache/phoenix/query/BaseTest.java | 30 
 5 files changed, 64 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/343d9262/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
index 6fe7aec..ccdc7b7 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
@@ -37,6 +37,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.math.BigDecimal;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -307,4 +308,19 @@ public class ColumnProjectionOptimizationIT extends 
BaseClientManagedTimeIT {
 admin.close();
 }
 }
+
+@Test
+public void testSelectWithConditionOnMultiCF() throws Exception {
+initMultiCFTable(getUrl());
+Connection conn = DriverManager.getConnection(getUrl());
+String query = SELECT c.db_cpu_utilization FROM MULTI_CF WHERE 
a.unique_user_count = ? and b.unique_org_count = ?;
+PreparedStatement statement = conn.prepareStatement(query);
+statement.setInt(1, 1);
+statement.setInt(2, 1);
+ResultSet rs = statement.executeQuery();
+boolean b = rs.next();
+assertTrue(b);
+assertEquals(BigDecimal.valueOf(40.1), rs.getBigDecimal(1));
+assertFalse(rs.next());
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/343d9262/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
index 4f0e0eb..0b158c7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
@@ -17,10 +17,7 @@
  */
 package org.apache.phoenix.filter;
 
-import java.util.TreeSet;
-
 import org.apache.hadoop.hbase.util.Bytes;
-
 import org.apache.phoenix.expression.Expression;
 
 
@@ -28,13 +25,12 @@ import org.apache.phoenix.expression.Expression;
  *
  * Filter that evaluates WHERE clause expression, used in the case where there
  * are references to multiple column qualifiers over multiple column families.
- *
+ * Also there same qualifier names in different families.
  * 
  * @since 0.1
  */
 public class MultiCFCQKeyValueComparisonFilter extends 
MultiKeyValueComparisonFilter {
 private final ImmutablePairBytesPtr ptr = new ImmutablePairBytesPtr();
-private TreeSetbyte[] cfSet;
 
 public MultiCFCQKeyValueComparisonFilter() {
 }
@@ -44,12 +40,6 @@ public class MultiCFCQKeyValueComparisonFilter extends 
MultiKeyValueComparisonFi
 }
 
 @Override
-protected void init() {
-cfSet = new TreeSetbyte[](Bytes.BYTES_COMPARATOR);
-super.init();
-}
-
-@Override
 protected Object setColumnKey(byte[] cf, int cfOffset, int cfLength,
 byte[] cq, int cqOffset, int cqLength) {
 ptr.set(cf, cfOffset, cfLength, cq, cqOffset, cqLength);
@@ -121,12 +111,4 @@ public class MultiCFCQKeyValueComparisonFilter extends 
MultiKeyValueComparisonFi
 return true;
 }
 }
-
-
-