git commit: Tweak SkipScanAfterManualSplitIT to only split at region boundaries

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/3.0 7cc2b5a54 - 40c2288f0


Tweak SkipScanAfterManualSplitIT to only split at region boundaries


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/40c2288f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/40c2288f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/40c2288f

Branch: refs/heads/3.0
Commit: 40c2288f0f15ed5d06a6a5cd8b0cd3171bb766b2
Parents: 7cc2b5a
Author: James Taylor jtay...@salesforce.com
Authored: Tue Jul 29 09:49:21 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Tue Jul 29 09:49:21 2014 -0700

--
 .../end2end/SkipScanAfterManualSplitIT.java | 59 +++-
 1 file changed, 58 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/40c2288f/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
index 4fdf4c5..71fa620 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
@@ -135,7 +135,7 @@ public class SkipScanAfterManualSplitIT extends 
BaseHBaseManagedTimeIT {
 assertEquals(nRegions, nInitialRegions);
 
 int nRows = 2;
-String query = SELECT count(*) FROM S WHERE a IN ('tl','jt');
+String query = SELECT /*+ NO_INTRA_REGION_PARALLELIZATION */ 
count(*) FROM S WHERE a IN ('tl','jt');
 ResultSet rs1 = conn.createStatement().executeQuery(query);
 assertTrue(rs1.next());
 traceRegionBoundaries(services);
@@ -155,4 +155,61 @@ public class SkipScanAfterManualSplitIT extends 
BaseHBaseManagedTimeIT {
 }
 
 }
+
+/* HBase-level repro of above issue. I believe the two scans need
+ * to be issued in parallel to repro (that's the only difference
+ * with the above tests).
+@Test
+public void testReproSplitBugAtHBaseLevel() throws Exception {
+initTable();
+Connection conn = DriverManager.getConnection(getUrl());
+ConnectionQueryServices services = 
conn.unwrap(PhoenixConnection.class).getQueryServices();
+traceRegionBoundaries(services);
+int nRegions = services.getAllTableRegions(TABLE_NAME_BYTES).size();
+int nInitialRegions = nRegions;
+HBaseAdmin admin = services.getAdmin();
+try {
+admin.split(TABLE_NAME);
+int nTries = 0;
+while (nRegions == nInitialRegions  nTries  10) {
+Thread.sleep(1000);
+nRegions = 
services.getAllTableRegions(TABLE_NAME_BYTES).size();
+nTries++;
+}
+// Split finished by this time, but cache isn't updated until
+// table is accessed
+assertEquals(nRegions, nInitialRegions);
+
+String query = SELECT count(*) FROM S WHERE a IN ('tl','jt');
+QueryPlan plan = 
conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query);
+HTableInterface table = services.getTable(TABLE_NAME_BYTES);
+Filter filter = 
plan.getContext().getScanRanges().getSkipScanFilter();
+Scan scan = new Scan();
+ResultScanner scanner;
+int count = 0;
+scan.setFilter(filter);
+
+scan.setStartRow(new byte[] {1, 't', 'l'});
+scan.setStopRow(new byte[] {1, 't', 'l'});
+scanner = table.getScanner(scan);
+count = 0;
+while (scanner.next() != null) {
+count++;
+}
+assertEquals(1, count);
+
+scan.setStartRow(new byte[] {3});
+scan.setStopRow(new byte[] {4});
+scanner = table.getScanner(scan);
+count = 0;
+while (scanner.next() != null) {
+count++;
+}
+assertEquals(1, count);
+} finally {
+admin.close();
+}
+}
+*/
+
 }



Jenkins build is back to normal : Phoenix | 3.0 | Hadoop1 #165

2014-07-29 Thread Apache Jenkins Server
See https://builds.apache.org/job/Phoenix-3.0-hadoop1/165/changes



git commit: PHOENIX-1128 Fix build errors introduced by switch to hadoop2 as default profile (GabrielReid)

2014-07-29 Thread mujtaba
Repository: phoenix
Updated Branches:
  refs/heads/4.0 aba5ea906 - 632624e08


PHOENIX-1128 Fix build errors introduced by switch to hadoop2 as default 
profile (GabrielReid)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/632624e0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/632624e0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/632624e0

Branch: refs/heads/4.0
Commit: 632624e0821814da8d61ab9a3b38ea2ae5bab946
Parents: aba5ea9
Author: Mujtaba mujt...@apache.org
Authored: Tue Jul 29 12:11:26 2014 -0700
Committer: Mujtaba mujt...@apache.org
Committed: Tue Jul 29 12:11:26 2014 -0700

--
 phoenix-core/pom.xml  | 13 +++--
 phoenix-flume/pom.xml |  4 +++-
 phoenix-pig/pom.xml   | 14 ++
 pom.xml   | 30 --
 4 files changed, 36 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/632624e0/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index af6dcb6..48e7fc2 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -303,13 +303,14 @@
   /dependencies
 
   profiles
-!-- Profile for building against Hadoop 1. Activate using: mvn 
-Dhadoop.profile=1--
+
+!-- Profile for building against Hadoop 1.  Active by default. Not used 
if another
+  Hadoop profile is specified with mvn -Dhadoop.profile=foo --
 profile
   idhadoop-1/id
   activation
 property
-  namehadoop.profile/name
-  value1/value
+  name!hadoop.profile/name
 /property
   /activation
   dependencies
@@ -387,13 +388,13 @@
   /dependencies
 /profile
 
-!-- Profile for building against Hadoop 2.  Active by default. Not used 
if another
-  Hadoop profile is specified with mvn -Dhadoop.profile=foo --
+!-- Profile for building against Hadoop 2. Activate using: mvn 
-Dhadoop.profile=2--
 profile
   idhadoop-2/id
   activation
 property
-  name!hadoop.profile/name
+  namehadoop.profile/name
+  value2/value
 /property
   /activation
   dependencies

http://git-wip-us.apache.org/repos/asf/phoenix/blob/632624e0/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index ea368c7..75aa51d 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -32,7 +32,7 @@
   namePhoenix - Flume/name
 
   dependencies
-dependency
+   dependency
   groupIdorg.apache.phoenix/groupId
   artifactIdphoenix-core/artifactId
 /dependency
@@ -46,6 +46,7 @@
   groupIdorg.apache.flume/groupId
   artifactIdflume-ng-core/artifactId
 /dependency
+
 !-- Test Dependencies --
 dependency
   groupIdjunit/groupId
@@ -97,6 +98,7 @@
   /build
   
   profiles
+
 !-- Profile for building against Hadoop 1. Active by default. Not used if 
another 
   Hadoop profile is specified with mvn -Dhadoop.profile=foo --
 profile

http://git-wip-us.apache.org/repos/asf/phoenix/blob/632624e0/phoenix-pig/pom.xml
--
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 07a76a0..a08453c 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -43,10 +43,6 @@
   scopetest/scope
 /dependency
 dependency
-  groupIdorg.apache.pig/groupId
-  artifactIdpig/artifactId
-/dependency
-dependency
   groupIdjoda-time/groupId
   artifactIdjoda-time/artifactId
 /dependency
@@ -75,6 +71,7 @@
   /build
 
   profiles
+
 !-- Profile for building against Hadoop 1. Active by default. Not used if 
another 
   Hadoop profile is specified with mvn -Dhadoop.profile=foo --
 profile
@@ -86,6 +83,10 @@
   /activation
   dependencies
 dependency
+  groupIdorg.apache.pig/groupId
+  artifactIdpig/artifactId
+/dependency
+dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-testing-util/artifactId
   version${hbase-hadoop1.version}/version
@@ -172,6 +173,11 @@
   /activation
   dependencies
 dependency
+  groupIdorg.apache.pig/groupId
+  artifactIdpig/artifactId
+  classifierh2/classifier
+/dependency
+dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-testing-util/artifactId
   version${hbase-hadoop2.version}/version

http://git-wip-us.apache.org/repos/asf/phoenix/blob/632624e0/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 1f63680..77765cc 

git commit: PHOENIX-1128 Fix build errors introduced by switch to hadoop2 as default profile (GabrielReid)

2014-07-29 Thread mujtaba
Repository: phoenix
Updated Branches:
  refs/heads/master bc384e7e0 - 7d2608d22


PHOENIX-1128 Fix build errors introduced by switch to hadoop2 as default 
profile (GabrielReid)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7d2608d2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7d2608d2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7d2608d2

Branch: refs/heads/master
Commit: 7d2608d227f3fca1032199dda302da9904ed091d
Parents: bc384e7
Author: Mujtaba mujt...@apache.org
Authored: Tue Jul 29 12:13:25 2014 -0700
Committer: Mujtaba mujt...@apache.org
Committed: Tue Jul 29 12:13:25 2014 -0700

--
 phoenix-core/pom.xml  | 13 +++--
 phoenix-flume/pom.xml |  4 +++-
 phoenix-pig/pom.xml   | 14 ++
 pom.xml   | 30 --
 4 files changed, 36 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7d2608d2/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index cfdee95..46125b6 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -303,13 +303,14 @@
   /dependencies
 
   profiles
-!-- Profile for building against Hadoop 1. Activate using: mvn 
-Dhadoop.profile=1--
+
+!-- Profile for building against Hadoop 1.  Active by default. Not used 
if another
+  Hadoop profile is specified with mvn -Dhadoop.profile=foo --
 profile
   idhadoop-1/id
   activation
 property
-  namehadoop.profile/name
-  value1/value
+  name!hadoop.profile/name
 /property
   /activation
   dependencies
@@ -387,13 +388,13 @@
   /dependencies
 /profile
 
-!-- Profile for building against Hadoop 2.  Active by default. Not used 
if another
-  Hadoop profile is specified with mvn -Dhadoop.profile=foo --
+!-- Profile for building against Hadoop 2. Activate using: mvn 
-Dhadoop.profile=2--
 profile
   idhadoop-2/id
   activation
 property
-  name!hadoop.profile/name
+  namehadoop.profile/name
+  value2/value
 /property
   /activation
   dependencies

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7d2608d2/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 5e9fd2f..d1bf843 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -32,7 +32,7 @@
   namePhoenix - Flume/name
 
   dependencies
-dependency
+   dependency
   groupIdorg.apache.phoenix/groupId
   artifactIdphoenix-core/artifactId
 /dependency
@@ -46,6 +46,7 @@
   groupIdorg.apache.flume/groupId
   artifactIdflume-ng-core/artifactId
 /dependency
+
 !-- Test Dependencies --
 dependency
   groupIdjunit/groupId
@@ -97,6 +98,7 @@
   /build
   
   profiles
+
 !-- Profile for building against Hadoop 1. Active by default. Not used if 
another 
   Hadoop profile is specified with mvn -Dhadoop.profile=foo --
 profile

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7d2608d2/phoenix-pig/pom.xml
--
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 53faa09..f1bb637 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -43,10 +43,6 @@
   scopetest/scope
 /dependency
 dependency
-  groupIdorg.apache.pig/groupId
-  artifactIdpig/artifactId
-/dependency
-dependency
   groupIdjoda-time/groupId
   artifactIdjoda-time/artifactId
 /dependency
@@ -75,6 +71,7 @@
   /build
 
   profiles
+
 !-- Profile for building against Hadoop 1. Active by default. Not used if 
another 
   Hadoop profile is specified with mvn -Dhadoop.profile=foo --
 profile
@@ -86,6 +83,10 @@
   /activation
   dependencies
 dependency
+  groupIdorg.apache.pig/groupId
+  artifactIdpig/artifactId
+/dependency
+dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-testing-util/artifactId
   version${hbase-hadoop1.version}/version
@@ -172,6 +173,11 @@
   /activation
   dependencies
 dependency
+  groupIdorg.apache.pig/groupId
+  artifactIdpig/artifactId
+  classifierh2/classifier
+/dependency
+dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-testing-util/artifactId
   version${hbase-hadoop2.version}/version

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7d2608d2/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 

git commit: Don't offset row key for local indexes on merge sort

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.0 632624e08 - b61d182f4


Don't offset row key for local indexes on merge sort


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b61d182f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b61d182f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b61d182f

Branch: refs/heads/4.0
Commit: b61d182f44d88b2373aec92e7800fa770f5e2d73
Parents: 632624e
Author: James Taylor jtay...@salesforce.com
Authored: Tue Jul 29 13:39:30 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Tue Jul 29 13:39:30 2014 -0700

--
 .../main/java/org/apache/phoenix/execute/ScanPlan.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b61d182f/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index f9af543..6ff5950 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -18,6 +18,9 @@
 package org.apache.phoenix.execute;
 
 
+import java.sql.SQLException;
+import java.util.List;
+
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.compile.RowProjector;
@@ -40,14 +43,11 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.TableRef;
-import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.util.ScanUtil;
 
-import java.sql.SQLException;
-import java.util.List;
-
 
 
 /**
@@ -122,7 +122,7 @@ public class ScanPlan extends BasicQueryPlan {
 
QueryServicesOptions.DEFAULT_ROW_KEY_ORDER_SALTED_TABLE) ||
  orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY ||
  orderBy == OrderBy.REV_ROW_KEY_ORDER_BY)) { // ORDER BY 
was optimized out b/c query is in row key order
-scanner = new MergeSortRowKeyResultIterator(iterators, 
SaltingUtil.NUM_SALTING_BYTES, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
+scanner = new MergeSortRowKeyResultIterator(iterators, 
isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == 
OrderBy.REV_ROW_KEY_ORDER_BY);
 } else {
 scanner = new ConcatResultIterator(iterators);
 }



Apache-Phoenix | Master | Hadoop1 | Build Successful

2014-07-29 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-master-hadoop1/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-master-hadoop1/lastCompletedBuild/testReport/

Changes
[jtaylor] Don't offset row key for local indexes on merge sort



git commit: PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting salted tables (Kyle Buzsaki)

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/3.0 40c2288f0 - 9ba71569b


PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting 
salted tables (Kyle Buzsaki)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9ba71569
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9ba71569
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9ba71569

Branch: refs/heads/3.0
Commit: 9ba71569befcc164e6737efff14bbdcbff166a57
Parents: 40c2288
Author: James Taylor jamestay...@apache.org
Authored: Tue Jul 29 17:15:37 2014 -0700
Committer: James Taylor jamestay...@apache.org
Committed: Tue Jul 29 17:15:37 2014 -0700

--
 .../src/main/java/org/apache/phoenix/filter/SkipScanFilter.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ba71569/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index 5a66e9c..d65e09c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -210,6 +210,8 @@ public class SkipScanFilter extends FilterBase {
 if (Arrays.equals(lowerPosition, position)  areSlotsSingleKey(0, 
position.length-1)) {
 return false;
 }
+} else if (filterAllRemaining()) {
+return true;
 }
 // Copy inclusive all positions 
 for (int i = 0; i = lastSlot; i++) {



git commit: PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting salted tables (Kyle Buzsaki)

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.0 b61d182f4 - f7f470528


PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting 
salted tables (Kyle Buzsaki)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f7f47052
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f7f47052
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f7f47052

Branch: refs/heads/4.0
Commit: f7f47052866a186ce316b317dbbdccb03170b0ac
Parents: b61d182
Author: James Taylor jamestay...@apache.org
Authored: Tue Jul 29 17:15:37 2014 -0700
Committer: James Taylor jamestay...@apache.org
Committed: Tue Jul 29 17:38:58 2014 -0700

--
 .../src/main/java/org/apache/phoenix/filter/SkipScanFilter.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f7f47052/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index a6b8161..5d23376 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -219,6 +219,8 @@ public class SkipScanFilter extends FilterBase implements 
Writable {
 if (Arrays.equals(lowerPosition, position)  areSlotsSingleKey(0, 
position.length-1)) {
 return false;
 }
+} else if (filterAllRemaining()) {
+return true;
 }
 // Copy inclusive all positions 
 for (int i = 0; i = lastSlot; i++) {



git commit: PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting salted tables (Kyle Buzsaki)

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master f4b05aba9 - e7868db3d


PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting 
salted tables (Kyle Buzsaki)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e7868db3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e7868db3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e7868db3

Branch: refs/heads/master
Commit: e7868db3d7c05cff013397024655640c64d1d4d1
Parents: f4b05ab
Author: James Taylor jamestay...@apache.org
Authored: Tue Jul 29 17:15:37 2014 -0700
Committer: James Taylor jamestay...@apache.org
Committed: Tue Jul 29 17:41:56 2014 -0700

--
 .../src/main/java/org/apache/phoenix/filter/SkipScanFilter.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e7868db3/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index a6b8161..5d23376 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -219,6 +219,8 @@ public class SkipScanFilter extends FilterBase implements 
Writable {
 if (Arrays.equals(lowerPosition, position)  areSlotsSingleKey(0, 
position.length-1)) {
 return false;
 }
+} else if (filterAllRemaining()) {
+return true;
 }
 // Copy inclusive all positions 
 for (int i = 0; i = lastSlot; i++) {



Apache-Phoenix | Master | Hadoop1 | Build Successful

2014-07-29 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-master-hadoop1/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-master-hadoop1/lastCompletedBuild/testReport/

Changes
[jamestaylor] PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting salted tables (Kyle Buzsaki)



Apache-Phoenix | 3.0 | Hadoop1 | Build Successful

2014-07-29 Thread Apache Jenkins Server
3.0 branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf/phoenix.git

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-3.0-hadoop1/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-3.0-hadoop1/lastCompletedBuild/testReport/

Changes
[jamestaylor] PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting salted tables (Kyle Buzsaki)



git commit: PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting salted tables (Kyle Buzsaki)

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.0 f7f470528 - 6a6a9c0f2


PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting 
salted tables (Kyle Buzsaki)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6a6a9c0f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6a6a9c0f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6a6a9c0f

Branch: refs/heads/4.0
Commit: 6a6a9c0f20118a57f9e1f43a16e63f9eeaeb3d57
Parents: f7f4705
Author: James Taylor jtay...@salesforce.com
Authored: Tue Jul 29 22:11:45 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Tue Jul 29 22:13:24 2014 -0700

--
 .../apache/phoenix/end2end/SkipScanQueryIT.java | 24 +++-
 .../apache/phoenix/filter/SkipScanFilter.java   |  7 +-
 2 files changed, 29 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a6a9c0f/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index 540197c..db5d15b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -274,4 +274,26 @@ public class SkipScanQueryIT extends 
BaseHBaseManagedTimeIT {
 }
 }
 
-}
+@Test
+public void testSkipScanIntersectionAtEnd() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+
+PreparedStatement stmt = conn.prepareStatement(create table 
splits_test 
++ (pk1 UNSIGNED_TINYINT NOT NULL, pk2 UNSIGNED_TINYINT NOT NULL, 
pk3 UNSIGNED_TINYINT NOT NULL, kv VARCHAR 
++ CONSTRAINT pk PRIMARY KEY (pk1, pk2, pk3)) SPLIT ON (?, ?, ?));
+stmt.setBytes(1, new byte[] {1, 1});
+stmt.setBytes(2, new byte[] {2, 1});
+stmt.setBytes(3, new byte[] {3, 1});
+stmt.execute();
+
+conn.createStatement().execute(upsert into splits_test values (0, 1, 
1, 'a'));
+conn.createStatement().execute(upsert into splits_test values (1, 1, 
1, 'a'));
+conn.createStatement().execute(upsert into splits_test values (2, 1, 
1, 'a'));
+conn.createStatement().execute(upsert into splits_test values (3, 1, 
1, 'a'));
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery(select count(kv) 
from splits_test where pk1 in (0, 1, 2, 3) AND pk2 = 1);
+assertTrue(rs.next());
+assertEquals(4, rs.getInt(1));
+assertFalse(rs.next());
+}}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a6a9c0f/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index 5d23376..13113c8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -220,7 +220,12 @@ public class SkipScanFilter extends FilterBase implements 
Writable {
 return false;
 }
 } else if (filterAllRemaining()) {
-return true;
+// We wrapped around the position array. We know there's an 
intersection, but it can only at the last
+// slot position. So reset the position array here to the last 
position index for each slot. This will
+// be used below as the end bounds to formulate the list of 
intersecting slots.
+for (int i = 0; i = lastSlot; i++) {
+position[i] = slots.get(i).size() - 1;
+}
 }
 // Copy inclusive all positions 
 for (int i = 0; i = lastSlot; i++) {



Apache-Phoenix | 4.0 | Hadoop1 | Build Successful

2014-07-29 Thread Apache Jenkins Server
4.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-1130 SkipScanFilter gets IndexOutOfBoundsException when intersecting salted tables (Kyle Buzsaki)



git commit: PHOENIX-1131 PhoenixRuntime.encodePk needs to pad row key values to max column length (Samarth Jain)

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/3.0 3d4ecad36 - 05b1ff4c6


PHOENIX-1131 PhoenixRuntime.encodePk needs to pad row key values to max column 
length (Samarth Jain)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/05b1ff4c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/05b1ff4c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/05b1ff4c

Branch: refs/heads/3.0
Commit: 05b1ff4c622041d0fa3741c298938945213a9bfa
Parents: 3d4ecad
Author: James Taylor jtay...@salesforce.com
Authored: Tue Jul 29 22:35:44 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Tue Jul 29 22:35:44 2014 -0700

--
 .../phoenix/end2end/PhoenixEncodeDecodeIT.java  | 28 
 .../org/apache/phoenix/util/PhoenixRuntime.java |  5 +++-
 2 files changed, 32 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/05b1ff4c/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
index fc01730..bdb0745 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
@@ -177,6 +177,34 @@ public class PhoenixEncodeDecodeIT extends 
BaseHBaseManagedTimeIT {
 
 assertEquals(Arrays.asList(decodedValues), 
Arrays.asList(retrievedValues));
 }
+
+@Test
+public void testEncodeDecodePaddingPks() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(
+CREATE TABLE T(pk1 CHAR(15) not null, pk2 CHAR(15) not null, 
v1 DATE  +
+CONSTRAINT pk PRIMARY KEY (pk1, pk2)));
+
+PreparedStatement stmt = conn.prepareStatement(UPSERT INTO T (pk1, 
pk2, v1) VALUES (?, ?, ?));
+stmt.setString(1,  def);
+stmt.setString(2,  eid);
+stmt.setDate(3, new Date(100));
+stmt.executeUpdate();
+conn.commit();
+
+stmt = conn.prepareStatement(SELECT pk1, pk2 FROM T);
+
+Object[] retrievedValues = new Object[2];
+ResultSet rs = stmt.executeQuery();
+rs.next();
+retrievedValues[0] = rs.getString(1);
+retrievedValues[1] = rs.getString(2);
+
+byte[] value = PhoenixRuntime.encodePK(conn, T, retrievedValues);
+Object[] decodedValues = PhoenixRuntime.decodePK(conn, T, value);
+
+assertEquals(Arrays.asList(decodedValues), 
Arrays.asList(retrievedValues));
+}
 
 private static Connection getTenantSpecificConnection() throws Exception {
 Properties props = new Properties();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/05b1ff4c/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index 06bdf5a..fdcc02a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -414,7 +414,10 @@ public class PhoenixRuntime {
 output.write(QueryConstants.SEPARATOR_BYTE);
 }
 type = pkColumns.get(i).getDataType();
-byte[] value = type.toBytes(values[i - offset]);
+
+//for fixed width data types like CHAR and BINARY, we need to 
pad values to be of max length.
+Object paddedObj = type.pad(values[i - offset], 
pkColumns.get(i).getMaxLength());
+byte[] value = type.toBytes(paddedObj);
 output.write(value);
 }
 return output.toByteArray();



git commit: PHOENIX-1131 PhoenixRuntime.encodePk needs to pad row key values to max column length (Samarth Jain)

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.0 6a6a9c0f2 - 85d91bde7


PHOENIX-1131 PhoenixRuntime.encodePk needs to pad row key values to max column 
length (Samarth Jain)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/85d91bde
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/85d91bde
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/85d91bde

Branch: refs/heads/4.0
Commit: 85d91bde7f2ffe39774e484ab41f64513789077c
Parents: 6a6a9c0
Author: James Taylor jtay...@salesforce.com
Authored: Tue Jul 29 22:35:44 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Tue Jul 29 22:36:38 2014 -0700

--
 .../phoenix/end2end/PhoenixEncodeDecodeIT.java  | 28 
 .../org/apache/phoenix/util/PhoenixRuntime.java |  5 +++-
 2 files changed, 32 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/85d91bde/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
index fc01730..bdb0745 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
@@ -177,6 +177,34 @@ public class PhoenixEncodeDecodeIT extends 
BaseHBaseManagedTimeIT {
 
 assertEquals(Arrays.asList(decodedValues), 
Arrays.asList(retrievedValues));
 }
+
+@Test
+public void testEncodeDecodePaddingPks() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(
+CREATE TABLE T(pk1 CHAR(15) not null, pk2 CHAR(15) not null, 
v1 DATE  +
+CONSTRAINT pk PRIMARY KEY (pk1, pk2)));
+
+PreparedStatement stmt = conn.prepareStatement(UPSERT INTO T (pk1, 
pk2, v1) VALUES (?, ?, ?));
+stmt.setString(1,  def);
+stmt.setString(2,  eid);
+stmt.setDate(3, new Date(100));
+stmt.executeUpdate();
+conn.commit();
+
+stmt = conn.prepareStatement(SELECT pk1, pk2 FROM T);
+
+Object[] retrievedValues = new Object[2];
+ResultSet rs = stmt.executeQuery();
+rs.next();
+retrievedValues[0] = rs.getString(1);
+retrievedValues[1] = rs.getString(2);
+
+byte[] value = PhoenixRuntime.encodePK(conn, T, retrievedValues);
+Object[] decodedValues = PhoenixRuntime.decodePK(conn, T, value);
+
+assertEquals(Arrays.asList(decodedValues), 
Arrays.asList(retrievedValues));
+}
 
 private static Connection getTenantSpecificConnection() throws Exception {
 Properties props = new Properties();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/85d91bde/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index b73185f..0f5e863 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -417,7 +417,10 @@ public class PhoenixRuntime {
 output.write(QueryConstants.SEPARATOR_BYTE);
 }
 type = pkColumns.get(i).getDataType();
-byte[] value = type.toBytes(values[i - offset]);
+
+//for fixed width data types like CHAR and BINARY, we need to 
pad values to be of max length.
+Object paddedObj = type.pad(values[i - offset], 
pkColumns.get(i).getMaxLength());
+byte[] value = type.toBytes(paddedObj);
 output.write(value);
 }
 return output.toByteArray();



git commit: PHOENIX-1131 PhoenixRuntime.encodePk needs to pad row key values to max column length (Samarth Jain)

2014-07-29 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master 382f901a6 - eeac05afa


PHOENIX-1131 PhoenixRuntime.encodePk needs to pad row key values to max column 
length (Samarth Jain)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eeac05af
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eeac05af
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eeac05af

Branch: refs/heads/master
Commit: eeac05afa257a7e733298a5680efede4abcd5355
Parents: 382f901
Author: James Taylor jtay...@salesforce.com
Authored: Tue Jul 29 22:35:44 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Tue Jul 29 22:38:34 2014 -0700

--
 .../phoenix/end2end/PhoenixEncodeDecodeIT.java  | 28 
 .../org/apache/phoenix/util/PhoenixRuntime.java |  5 +++-
 2 files changed, 32 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eeac05af/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
index fc01730..bdb0745 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixEncodeDecodeIT.java
@@ -177,6 +177,34 @@ public class PhoenixEncodeDecodeIT extends 
BaseHBaseManagedTimeIT {
 
 assertEquals(Arrays.asList(decodedValues), 
Arrays.asList(retrievedValues));
 }
+
+@Test
+public void testEncodeDecodePaddingPks() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(
+CREATE TABLE T(pk1 CHAR(15) not null, pk2 CHAR(15) not null, 
v1 DATE  +
+CONSTRAINT pk PRIMARY KEY (pk1, pk2)));
+
+PreparedStatement stmt = conn.prepareStatement(UPSERT INTO T (pk1, 
pk2, v1) VALUES (?, ?, ?));
+stmt.setString(1,  def);
+stmt.setString(2,  eid);
+stmt.setDate(3, new Date(100));
+stmt.executeUpdate();
+conn.commit();
+
+stmt = conn.prepareStatement(SELECT pk1, pk2 FROM T);
+
+Object[] retrievedValues = new Object[2];
+ResultSet rs = stmt.executeQuery();
+rs.next();
+retrievedValues[0] = rs.getString(1);
+retrievedValues[1] = rs.getString(2);
+
+byte[] value = PhoenixRuntime.encodePK(conn, T, retrievedValues);
+Object[] decodedValues = PhoenixRuntime.decodePK(conn, T, value);
+
+assertEquals(Arrays.asList(decodedValues), 
Arrays.asList(retrievedValues));
+}
 
 private static Connection getTenantSpecificConnection() throws Exception {
 Properties props = new Properties();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eeac05af/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index a52cd95..af4c9e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -415,7 +415,10 @@ public class PhoenixRuntime {
 output.write(QueryConstants.SEPARATOR_BYTE);
 }
 type = pkColumns.get(i).getDataType();
-byte[] value = type.toBytes(values[i - offset]);
+
+//for fixed width data types like CHAR and BINARY, we need to 
pad values to be of max length.
+Object paddedObj = type.pad(values[i - offset], 
pkColumns.get(i).getMaxLength());
+byte[] value = type.toBytes(paddedObj);
 output.write(value);
 }
 return output.toByteArray();



Build failed in Jenkins: Phoenix | 4.0 | Hadoop1 #240

2014-07-29 Thread Apache Jenkins Server
See https://builds.apache.org/job/Phoenix-4.0-hadoop1/240/changes

Changes:

[jtaylor] PHOENIX-1131 PhoenixRuntime.encodePk needs to pad row key values to 
max column length (Samarth Jain)

--
[...truncated 282 lines...]
Running org.apache.phoenix.trace.TraceReaderTest
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.088 sec - in 
org.apache.phoenix.query.ConnectionlessTest
Running org.apache.phoenix.index.IndexMaintainerTest
Tests run: 23, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.454 sec - in 
org.apache.phoenix.index.IndexMaintainerTest
Running org.apache.phoenix.filter.SkipScanFilterTest
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.061 sec - in 
org.apache.phoenix.filter.SkipScanFilterTest
Running org.apache.phoenix.filter.SkipScanFilterIntersectTest
Tests run: 17, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.023 sec - in 
org.apache.phoenix.filter.SkipScanFilterIntersectTest
Running org.apache.phoenix.iterate.AggregateResultScannerTest
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.101 sec - in 
org.apache.phoenix.iterate.AggregateResultScannerTest
Running org.apache.phoenix.iterate.ConcatResultIteratorTest
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.002 sec - in 
org.apache.phoenix.iterate.ConcatResultIteratorTest
Running org.apache.phoenix.iterate.SpoolingResultIteratorTest
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.091 sec - in 
org.apache.phoenix.iterate.SpoolingResultIteratorTest
Running org.apache.phoenix.iterate.MergeSortResultIteratorTest
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.002 sec - in 
org.apache.phoenix.iterate.MergeSortResultIteratorTest
Running org.apache.phoenix.arithmetic.ArithmeticOperationTest
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.008 sec - in 
org.apache.phoenix.arithmetic.ArithmeticOperationTest
Running org.apache.phoenix.schema.RowKeyValueAccessorTest
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.079 sec - in 
org.apache.phoenix.schema.RowKeyValueAccessorTest
Running org.apache.phoenix.schema.SchemaUtilTest
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.002 sec - in 
org.apache.phoenix.schema.SchemaUtilTest
Running org.apache.phoenix.schema.PDataTypeTest
Tests run: 23, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.023 sec - in 
org.apache.phoenix.schema.PDataTypeTest
Running org.apache.phoenix.schema.SortOrderTest
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.004 sec - in 
org.apache.phoenix.schema.SortOrderTest
Running org.apache.phoenix.schema.ValueBitSetTest
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.005 sec - in 
org.apache.phoenix.schema.ValueBitSetTest
Running org.apache.phoenix.schema.PDataTypeForArraysTest
Tests run: 60, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.042 sec - in 
org.apache.phoenix.schema.PDataTypeForArraysTest
Running org.apache.phoenix.schema.RowKeySchemaTest
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.09 sec - in 
org.apache.phoenix.schema.RowKeySchemaTest
Running org.apache.phoenix.schema.PMetaDataImplTest
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.003 sec - in 
org.apache.phoenix.schema.PMetaDataImplTest
Running org.apache.phoenix.hbase.index.covered.TestCoveredColumns
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.001 sec - in 
org.apache.phoenix.hbase.index.covered.TestCoveredColumns
Running org.apache.phoenix.hbase.index.covered.data.TestIndexMemStore
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.009 sec - in 
org.apache.phoenix.hbase.index.covered.data.TestIndexMemStore
Running org.apache.phoenix.hbase.index.covered.TestLocalTableState
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.322 sec - in 
org.apache.phoenix.hbase.index.covered.TestLocalTableState
Running org.apache.phoenix.hbase.index.covered.filter.TestFamilyOnlyFilter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.001 sec - in 
org.apache.phoenix.hbase.index.covered.filter.TestFamilyOnlyFilter
Running 
org.apache.phoenix.hbase.index.covered.filter.TestApplyAndFilterDeletesFilter
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.003 sec - in 
org.apache.phoenix.hbase.index.covered.filter.TestApplyAndFilterDeletesFilter
Running org.apache.phoenix.hbase.index.covered.filter.TestNewerTimestampFilter
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.001 sec - in 
org.apache.phoenix.hbase.index.covered.filter.TestNewerTimestampFilter
Running org.apache.phoenix.hbase.index.covered.update.TestIndexUpdateManager
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.223 sec - in 
org.apache.phoenix.hbase.index.covered.update.TestIndexUpdateManager
Running 
org.apache.phoenix.hbase.index.covered.example.TestCoveredColumnIndexCodec