Jenkins build is back to normal : Phoenix | 4.0 | Hadoop2 #168

2014-09-25 Thread Apache Jenkins Server
See 



Apache-Phoenix | Master | Hadoop1 | Build Successful

2014-09-25 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-master-hadoop1/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-master-hadoop1/lastCompletedBuild/testReport/

Changes
[ramkrishna] Phoenix-1264 : Add StatisticsCollector to existing tables on first



Apache-Phoenix | 4.0 | Hadoop1 | Build Successful

2014-09-25 Thread Apache Jenkins Server
4.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastCompletedBuild/testReport/

Changes
[ramkrishna] Phoenix-1264 Add StatisticsCollector to existing tables on first



git commit: Phoenix-1264 Add StatisticsCollector to existing tables on first connection to cluster

2014-09-25 Thread ramkrishna
Repository: phoenix
Updated Branches:
  refs/heads/4.0 db9036e95 -> 4070492a9


Phoenix-1264 Add StatisticsCollector to existing tables on first
connection to cluster


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4070492a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4070492a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4070492a

Branch: refs/heads/4.0
Commit: 4070492a9dbd55ab95fe666b4cd6eebe8fe5aeb9
Parents: db9036e
Author: Ramkrishna 
Authored: Fri Sep 26 11:22:58 2014 +0530
Committer: Ramkrishna 
Committed: Fri Sep 26 11:22:58 2014 +0530

--
 ...efaultParallelIteratorsRegionSplitterIT.java |  15 ++
 .../phoenix/end2end/GuidePostsLifeCycleIT.java  |  22 +-
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   |  15 ++
 .../phoenix/end2end/MultiCfQueryExecIT.java |  14 ++
 .../phoenix/end2end/StatsCollectorIT.java   |  44 ++--
 .../coprocessor/BaseScannerRegionObserver.java  |   1 +
 .../UngroupedAggregateRegionObserver.java   |  72 +-
 .../DefaultParallelIteratorRegionSplitter.java  |  30 +--
 .../phoenix/query/ConnectionQueryServices.java  |   3 -
 .../query/ConnectionQueryServicesImpl.java  |  51 +---
 .../query/ConnectionlessQueryServicesImpl.java  |   6 -
 .../query/DelegateConnectionQueryServices.java  |   5 -
 .../apache/phoenix/schema/MetaDataClient.java   |  36 ++-
 .../schema/stat/StatisticsCollector.java| 249 +--
 .../phoenix/schema/stat/StatisticsScanner.java  |   7 +-
 .../phoenix/schema/stat/StatisticsTable.java|  49 ++--
 .../phoenix/schema/stat/StatisticsTracker.java  |  62 -
 .../java/org/apache/phoenix/util/ScanUtil.java  |   4 +
 .../phoenix/query/QueryServicesTestImpl.java|   2 +-
 .../src/main/StatisticsCollect.proto|  20 --
 20 files changed, 272 insertions(+), 435 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4070492a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
index dd1dc8b..a6ec835 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
@@ -27,6 +27,7 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HConstants;
@@ -40,13 +41,18 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import com.google.common.collect.Maps;
+
 
 /**
  * Tests for {@link DefaultParallelIteratorRegionSplitter}.
@@ -58,6 +64,14 @@ import org.junit.experimental.categories.Category;
 @Category(ClientManagedTimeTest.class)
 public class DefaultParallelIteratorsRegionSplitterIT extends 
BaseParallelIteratorsRegionSplitterIT {
 
+@BeforeClass
+@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
+public static void doSetup() throws Exception {
+Map props = Maps.newHashMapWithExpectedSize(3);
+// Must update config before starting server
+props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, 
Long.toString(20l));
+setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+}
 private static List getSplits(Connection conn, long ts, final 
Scan scan)
 throws SQLException {
 TableRef tableRef = getTableRef(conn, ts);
@@ -93,6 +107,7 @@ public class DefaultParallelIteratorsRegionSplitterIT 
extends BaseParallelIterat
 Scan scan = new Scan();
 
 // number of regions > target query concurrency
+conn.prepareStatement("SELECT COUNT(*) FROM STABLE").executeQuery();
 scan.setStartRow(K1);
 scan.setStopRow(K12);
 List keyRanges = getSplits(conn, ts, scan);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4070492a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java
-

git commit: Phoenix-1264 : Add StatisticsCollector to existing tables on first connection to cluster

2014-09-25 Thread ramkrishna
Repository: phoenix
Updated Branches:
  refs/heads/master 10efdb1f2 -> 6b0461002


Phoenix-1264 : Add StatisticsCollector to existing tables on first
connection to cluster


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6b046100
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6b046100
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6b046100

Branch: refs/heads/master
Commit: 6b04610022415fcc27ea69fe001cbd464badf355
Parents: 10efdb1
Author: Ramkrishna 
Authored: Fri Sep 26 11:21:40 2014 +0530
Committer: Ramkrishna 
Committed: Fri Sep 26 11:21:40 2014 +0530

--
 ...efaultParallelIteratorsRegionSplitterIT.java |  15 ++
 .../phoenix/end2end/GuidePostsLifeCycleIT.java  |  22 +-
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   |  15 ++
 .../phoenix/end2end/MultiCfQueryExecIT.java |  14 ++
 .../phoenix/end2end/StatsCollectorIT.java   |  44 ++--
 .../coprocessor/BaseScannerRegionObserver.java  |   1 +
 .../UngroupedAggregateRegionObserver.java   |  72 +-
 .../DefaultParallelIteratorRegionSplitter.java  |  30 +--
 .../phoenix/query/ConnectionQueryServices.java  |   3 -
 .../query/ConnectionQueryServicesImpl.java  |  51 +---
 .../query/ConnectionlessQueryServicesImpl.java  |   6 -
 .../query/DelegateConnectionQueryServices.java  |   5 -
 .../apache/phoenix/schema/MetaDataClient.java   |  36 ++-
 .../schema/stat/StatisticsCollector.java| 249 +--
 .../phoenix/schema/stat/StatisticsScanner.java  |   7 +-
 .../phoenix/schema/stat/StatisticsTable.java|  49 ++--
 .../phoenix/schema/stat/StatisticsTracker.java  |  62 -
 .../java/org/apache/phoenix/util/ScanUtil.java  |   4 +
 .../phoenix/query/QueryServicesTestImpl.java|   2 +-
 .../src/main/StatisticsCollect.proto|  20 --
 20 files changed, 272 insertions(+), 435 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6b046100/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
index dd1dc8b..a6ec835 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
@@ -27,6 +27,7 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HConstants;
@@ -40,13 +41,18 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import com.google.common.collect.Maps;
+
 
 /**
  * Tests for {@link DefaultParallelIteratorRegionSplitter}.
@@ -58,6 +64,14 @@ import org.junit.experimental.categories.Category;
 @Category(ClientManagedTimeTest.class)
 public class DefaultParallelIteratorsRegionSplitterIT extends 
BaseParallelIteratorsRegionSplitterIT {
 
+@BeforeClass
+@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
+public static void doSetup() throws Exception {
+Map props = Maps.newHashMapWithExpectedSize(3);
+// Must update config before starting server
+props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, 
Long.toString(20l));
+setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+}
 private static List getSplits(Connection conn, long ts, final 
Scan scan)
 throws SQLException {
 TableRef tableRef = getTableRef(conn, ts);
@@ -93,6 +107,7 @@ public class DefaultParallelIteratorsRegionSplitterIT 
extends BaseParallelIterat
 Scan scan = new Scan();
 
 // number of regions > target query concurrency
+conn.prepareStatement("SELECT COUNT(*) FROM STABLE").executeQuery();
 scan.setStartRow(K1);
 scan.setStopRow(K12);
 List keyRanges = getSplits(conn, ts, scan);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6b046100/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.ja

Build failed in Jenkins: Phoenix | 4.0 | Hadoop2 #167

2014-09-25 Thread Apache Jenkins Server
See 

--
[...truncated 788 lines...]
remote: Counting objects: 5794   
remote: Counting objects: 5795   
remote: Counting objects: 5797   
remote: Counting objects: 5802   
remote: Counting objects: 5803   
remote: Counting objects: 5805   
remote: Counting objects: 5808   
remote: Counting objects: 5810   
remote: Counting objects: 5816   
remote: Counting objects: 5817   
remote: Counting objects: 5820   
remote: Counting objects: 5821   
remote: Counting objects: 5823   
remote: Counting objects: 5827   
remote: Counting objects: 5829   
remote: Counting objects: 5834   
remote: Counting objects: 5839   
remote: Counting objects: 5844   
remote: Counting objects: 5851   
remote: Counting objects: 5865   
remote: Counting objects: 5868   
remote: Counting objects: 5883   
remote: Counting objects: 5897   
remote: Counting objects: 5900   
remote: Counting objects: 5904   
remote: Counting objects: 5919   
remote: Counting objects: 5929   
remote: Counting objects: 5931   
remote: Counting objects: 5938   
remote: Counting objects: 5953   
remote: Counting objects: 5961   
remote: Counting objects: 5966   
remote: Counting objects: 5970   
remote: Counting objects: 5985   
remote: Counting objects: 5997   
remote: Counting objects: 6003   
remote: Counting objects: 6026   
remote: Counting objects: 6042   
remote: Counting objects: 6064   
remote: Counting objects: 6102   
remote: Counting objects: 6124   
remote: Counting objects: 6136   
remote: Counting objects: 6154   
remote: Counting objects: 6157   
remote: Counting objects: 6169   
remote: Counting objects: 6180   
remote: Counting objects: 6209   
remote: Counting objects: 6255   
remote: Counting objects: 6270   
remote: Counting objects: 6290   
remote: Counting objects: 6338   
remote: Counting objects: 6396   
remote: Counting objects: 6409   
remote: Counting objects: 6462   
remote: Counting objects: 6564   
remote: Counting objects: 6619   
remote: Counting objects: 6692   
remote: Counting objects: 6707   
remote: Counting objects: 6724   
remote: Counting objects: 6830   
remote: Counting objects: 6902   
remote: Counting objects: 6974   
remote: Counting objects: 7105   
remote: Counting objects: 7159   
remote: Counting objects: 7205   
remote: Counting objects: 7341   
remote: Counting objects: 7484   
remote: Counting objects: 7527   
remote: Counting objects: 7599   
remote: Counting objects: 7613   
remote: Counting objects: 7649   
remote: Counting objects: 7737   
remote: Counting objects: 7749   
remote: Counting objects: 7799   
remote: Counting objects: 7934   
remote: Counting objects: 7969   
remote: Counting objects: 7981   
remote: Counting objects: 8280   
remote: Counting objects: 8326   
remote: Counting objects: 8337   
remote: Counting objects: 8353   
remote: Counting objects: 8401   
remote: Counting objects: 8615   
remote: Counting objects: 8674   
remote: Counting objects: 8710   
remote: Counting objects: 8760   
remote: Counting objects: 8815   
remote: Counting objects: 8886   
remote: Counting objects: 8939   
remote: Counting objects: 9004   
remote: Counting objects: 9067   
remote: Counting objects: 9082   
remote: Counting objects: 9090   
remote: Counting objects: 9158   
remote: Counting objects: 9205   
remote: Counting objects: 9256   
remote: Counting objects: 9268   
remote: Counting objects: 9289   
remote: Counting objects: 9329   
remote: Counting objects: 9432   
remote: Counting objects: 9459   
remote: Counting objects: 9460   
remote: Counting objects: 9463   
remote: Counting objects: 9516   
remote: Counting objects: 9569   
remote: Counting objects: 9625   
remote: Counting objects: 9647   
remote: Counting objects: 9671   
remote: Counting objects: 9712   
remote: Counting objects: 9742   
remote: Counting objects: 9747   
remote: Counting objects: 9751   
remote: Counting objects: 9753   
remote: Counting objects: 9755   
remote: Counting objects: 9758   
remote: Counting objects: 9768  

Jenkins build is back to normal : Phoenix | 3.0 | Hadoop1 #227

2014-09-25 Thread Apache Jenkins Server
See 



Jenkins build is back to normal : Phoenix | 4.0 | Hadoop2 #166

2014-09-25 Thread Apache Jenkins Server
See 



git commit: PHOENIX-180 Use stats to guide query parallelization (remove mistakenly checked-in files)

2014-09-25 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/3.0 9dd3bc74b -> b4811ad7f


PHOENIX-180 Use stats to guide query parallelization (remove mistakenly 
checked-in files)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b4811ad7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b4811ad7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b4811ad7

Branch: refs/heads/3.0
Commit: b4811ad7f67cfee027692ee27a503898cd75fdcf
Parents: 9dd3bc7
Author: maryannxue 
Authored: Thu Sep 25 14:43:55 2014 -0400
Committer: maryannxue 
Committed: Thu Sep 25 14:43:55 2014 -0400

--
 .../schema/stat/PTableStatsImpl.java.orig   | 68 --
 .../schema/stat/PTableStatsImpl.java.rej| 74 
 2 files changed, 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b4811ad7/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java.orig
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java.orig
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java.orig
deleted file mode 100644
index a6f6dae..000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java.orig
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.schema.stat;
-
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.WritableUtils;
-
-import com.google.common.collect.ImmutableMap;
-
-
-/**
- * Implementation for PTableStats.
- */
-public class PTableStatsImpl implements PTableStats {
-
-// The map for guide posts should be immutable. We only take the current 
snapshot from outside
-// method call and store it.
-private Map regionGuidePosts;
-
-public PTableStatsImpl() { }
-
-public PTableStatsImpl(Map stats) {
-regionGuidePosts = ImmutableMap.copyOf(stats);
-}
-
-@Override
-public byte[][] getRegionGuidePosts(HRegionInfo region) {
-return regionGuidePosts.get(region.getRegionNameAsString());
-}
-
-@Override
-public void write(DataOutput output) throws IOException {
-if (regionGuidePosts == null) {
-WritableUtils.writeVInt(output, 0);
-return;
-}
-WritableUtils.writeVInt(output, regionGuidePosts.size());
-for (Entry entry : regionGuidePosts.entrySet()) {
-WritableUtils.writeString(output, entry.getKey());
-byte[][] value = entry.getValue();
-WritableUtils.writeVInt(output, value.length);
-for (int i=0; ihttp://git-wip-us.apache.org/repos/asf/phoenix/blob/b4811ad7/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java.rej
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java.rej
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java.rej
deleted file mode 100644
index 2bfc847..000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java.rej
+++ /dev/null
@@ -1,74 +0,0 @@
-***
-*** 16,55 
-   * limitations under the License.
-   */
-  package org.apache.phoenix.schema.stat;
-  
-- import java.util.Map;
-- 
-- import org.apache.hadoop.hbase.HRegionInfo;
-- 
-- import com.google.common.collect.ImmutableMap;
-- 
-- 
-- /**
-   * Implementation for PTableStats.
-   */
-  public class PTableStatsImpl implements PTableStats {
-  
-- // The map for guide posts should be immutable. We only take the current 
snapshot from outside
-- // method call and store it.
-- private Map regionGuidePosts;
-  
-- public PTableStatsImpl() { }
-  
-- public PTableS

Build failed in Jenkins: Phoenix | 3.0 | Hadoop1 #226

2014-09-25 Thread Apache Jenkins Server
See 

Changes:

[larsh] PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new 
immutable list object.

--
[...truncated 572 lines...]
at org.junit.runners.Suite.runChild(Suite.java:127)
at org.junit.runners.Suite.runChild(Suite.java:26)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runner.JUnitCore.run(JUnitCore.java:160)
at org.junit.runner.JUnitCore.run(JUnitCore.java:138)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.createRequestAndRun(JUnitCoreWrapper.java:113)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.executeLazy(JUnitCoreWrapper.java:94)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.execute(JUnitCoreWrapper.java:58)
at 
org.apache.maven.surefire.junitcore.JUnitCoreProvider.invoke(JUnitCoreProvider.java:134)
at 
org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
at 
org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
at 
org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)

org.apache.phoenix.end2end.SkipScanAfterManualSplitIT  Time elapsed: 7,184.882 
sec  <<< FAILURE!
java.lang.AssertionError: null
at org.junit.Assert.fail(Assert.java:86)
at org.junit.Assert.assertTrue(Assert.java:41)
at org.junit.Assert.assertTrue(Assert.java:52)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.dropTables(BaseHBaseManagedTimeIT.java:70)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runners.Suite.runChild(Suite.java:127)
at org.junit.runners.Suite.runChild(Suite.java:26)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runner.JUnitCore.run(JUnitCore.java:160)
at org.junit.runner.JUnitCore.run(JUnitCore.java:138)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.createRequestAndRun(JUnitCoreWrapper.java:113)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.executeLazy(JUnitCoreWrapper.java:94)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.execute(JUnitCoreWrapper.java:58)
at 
org.apache.maven.surefire.junitcore.JUnitCoreProvider.invoke(JUnitCoreProvider.java:134)
at 
org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
at 
org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
at 
org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


Results :

Failed tests: 
  SkipScanAfterManualSplitIT>BaseHBaseManagedTimeIT.dropTables:70 null

Tests in error: 
  
SkipScanAfterManualSplitIT>BaseHBaseManagedTimeIT.cleanUpAfterTest:56->BaseTest.deletePriorTables:683->BaseTest.deletePriorTables:693->BaseTest.deletePriorTables:727
 ? PhoenixIO

Tests run: 409, Failures: 1, Errors: 1, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.17:integration-test (ClientManagedTimeTests) 
@ phoenix-core ---
[INFO] Failsafe report directory: 

[INFO] parallel='none', perCoreThreadCount=true, threadCount=0, 
useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, 
threadCountMethods=0, parallelOptimized=true

-

[1/2] PHOENIX-1168 Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL

2014-09-25 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/3.0 2d250fbff -> 9dd3bc74b


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9dd3bc74/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 19a2789..66ad235 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -19,8 +19,9 @@ package org.apache.phoenix.execute;
 
 import java.sql.ParameterMetaData;
 import java.sql.SQLException;
-import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
@@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
+import org.apache.phoenix.compile.ColumnProjector;
 import org.apache.phoenix.compile.ExplainPlan;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
@@ -41,12 +43,14 @@ import org.apache.phoenix.compile.RowProjector;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.compile.WhereCompiler;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.ComparisonExpression;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.InListExpression;
 import org.apache.phoenix.expression.LiteralExpression;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.expression.RowValueConstructorExpression;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.job.JobManager.JobCallable;
 import org.apache.phoenix.join.HashCacheClient;
@@ -61,9 +65,11 @@ import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PArrayDataType;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 
@@ -75,29 +81,42 @@ public class HashJoinPlan implements QueryPlan {
 private final FilterableStatement statement;
 private final BasicQueryPlan plan;
 private final HashJoinInfo joinInfo;
-private final List[] hashExpressions;
-private final Expression[] keyRangeLhsExpressions;
-private final Expression[] keyRangeRhsExpressions;
-private final QueryPlan[] hashPlans;
-private final TupleProjector[] clientProjectors;
-private final boolean[] hasFilters;
+private final SubPlan[] subPlans;
+private final boolean recompileWhereClause;
 private final boolean forceHashJoinRangeScan;
 private final boolean forceHashJoinSkipScan;
-
-public HashJoinPlan(FilterableStatement statement, 
-BasicQueryPlan plan, HashJoinInfo joinInfo,
-List[] hashExpressions, Expression[] 
keyRangeLhsExpressions,
-Expression[] keyRangeRhsExpressions, QueryPlan[] hashPlans, 
-TupleProjector[] clientProjectors, boolean[] hasFilters) {
+private List dependencies;
+private HashCacheClient hashClient;
+private int maxServerCacheTimeToLive;
+private AtomicLong firstJobEndTime;
+private List keyRangeExpressions;
+
+public static HashJoinPlan create(FilterableStatement statement, 
+QueryPlan plan, HashJoinInfo joinInfo, SubPlan[] subPlans) {
+if (plan instanceof BasicQueryPlan)
+return new HashJoinPlan(statement, (BasicQueryPlan) plan, 
joinInfo, subPlans, joinInfo == null);
+
+assert (plan instanceof HashJoinPlan);
+HashJoinPlan hashJoinPlan = (HashJoinPlan) plan;
+assert hashJoinPlan.joinInfo == null;
+SubPlan[] mergedSubPlans = new SubPlan[hashJoinPlan.subPlans.length + 
subPlans.length];
+int i = 0;
+for (SubPlan subPlan : hashJoinPlan.subPlans) {
+mergedSubPlans[i++] = subPlan;
+}
+for (SubPlan subPlan : subPlans) {
+mergedSubPlans[i++] = subPlan;
+}
+return new HashJoinPlan(statement, hashJoinPlan.pl

[2/2] git commit: PHOENIX-1168 Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL

2014-09-25 Thread maryannxue
PHOENIX-1168 Support non-correlated sub-queries in where clause having a 
comparison operator with no modifier or a comparison operator modified by ANY, 
SOME or ALL


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9dd3bc74
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9dd3bc74
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9dd3bc74

Branch: refs/heads/3.0
Commit: 9dd3bc74b6bc836847e90df0c0e0ea7cec93986a
Parents: 2d250fb
Author: maryannxue 
Authored: Thu Sep 25 14:37:55 2014 -0400
Committer: maryannxue 
Committed: Thu Sep 25 14:37:55 2014 -0400

--
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 264 +++
 phoenix-core/src/main/antlr3/PhoenixSQL.g   |   4 +-
 .../phoenix/compile/ExpressionCompiler.java |  37 +++
 .../apache/phoenix/compile/JoinCompiler.java|   7 +
 .../apache/phoenix/compile/QueryCompiler.java   |  57 ++--
 .../phoenix/compile/StatementContext.java   |  18 ++
 .../phoenix/compile/StatementNormalizer.java|  17 +-
 .../apache/phoenix/compile/WhereCompiler.java   |  45 ++-
 .../phoenix/exception/SQLExceptionCode.java |   1 +
 .../apache/phoenix/execute/BasicQueryPlan.java  |   2 +-
 .../apache/phoenix/execute/HashJoinPlan.java| 323 +--
 .../expression/ComparisonExpression.java|   3 +-
 .../apache/phoenix/parse/ParseNodeFactory.java  |  18 +-
 .../apache/phoenix/parse/ParseNodeRewriter.java |  36 ++-
 .../apache/phoenix/parse/ParseNodeVisitor.java  |   6 +-
 .../phoenix/parse/SelectStatementRewriter.java  |  13 +
 .../StatelessTraverseAllParseNodeVisitor.java   |   5 +
 .../apache/phoenix/parse/SubqueryParseNode.java |   8 +-
 .../parse/TraverseAllParseNodeVisitor.java  |  10 +
 .../parse/TraverseNoParseNodeVisitor.java   |  15 +
 .../phoenix/compile/JoinQueryCompilerTest.java  |   4 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  22 +-
 22 files changed, 711 insertions(+), 204 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9dd3bc74/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index c2c7298..5243a2e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -118,7 +118,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [I.NAME]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ITEM_TABLE_DISPLAY_NAME,
 /* 
@@ -131,7 +131,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.item_id]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
 "SERVER FILTER BY FIRST KEY ONLY",
@@ -146,7 +146,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY 
[I.item_id]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, 
I.item_id]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ORDER_TABLE_DISPLAY_NAME,
 /* 
@@ -159,7 +159,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [I.NAME]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ORDER_TABLE_DISPLAY_NAME,
 /*
@@ -173,7 +173,7 @@ public class HashJoinIT extends BaseHBa

Jenkins build is back to normal : Phoenix | Master | Hadoop1 #385

2014-09-25 Thread Apache Jenkins Server
See 



Jenkins build is back to normal : Phoenix | 4.0 | Hadoop1 #335

2014-09-25 Thread Apache Jenkins Server
See 



git commit: PHOENIX-1168 Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL(fix regression)

2014-09-25 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/4.0 81477ee89 -> db9036e95


PHOENIX-1168 Support non-correlated sub-queries in where clause having a 
comparison operator with no modifier or a comparison operator modified by ANY, 
SOME or ALL(fix regression)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/db9036e9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/db9036e9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/db9036e9

Branch: refs/heads/4.0
Commit: db9036e95bec5847155628819d187d15824ec351
Parents: 81477ee
Author: maryannxue 
Authored: Thu Sep 25 13:58:48 2014 -0400
Committer: maryannxue 
Committed: Thu Sep 25 13:58:48 2014 -0400

--
 .../java/org/apache/phoenix/compile/StatementNormalizer.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/db9036e9/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
index 88aa81c..698756b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
@@ -22,6 +22,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.ArrayElemRefNode;
 import org.apache.phoenix.parse.BetweenParseNode;
 import org.apache.phoenix.parse.BindTableNode;
 import org.apache.phoenix.parse.ColumnParseNode;
@@ -144,7 +145,8 @@ public class StatementNormalizer extends ParseNodeRewriter {
 
 @Override
 public ParseNode visitLeave(ComparisonParseNode node, List 
nodes) throws SQLException {
- if (nodes.get(0).isStateless() && !nodes.get(1).isStateless()) {
+ if (nodes.get(0).isStateless() && !nodes.get(1).isStateless()
+ && !(nodes.get(1) instanceof ArrayElemRefNode)) {
  List normNodes = Lists.newArrayListWithExpectedSize(2);
  normNodes.add(nodes.get(1));
  normNodes.add(nodes.get(0));



git commit: PHOENIX-1168 Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL(fix regression)

2014-09-25 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/master a0694b77c -> 10efdb1f2


PHOENIX-1168 Support non-correlated sub-queries in where clause having a 
comparison operator with no modifier or a comparison operator modified by ANY, 
SOME or ALL(fix regression)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/10efdb1f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/10efdb1f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/10efdb1f

Branch: refs/heads/master
Commit: 10efdb1f274d344536baef56101ab903c10de12b
Parents: a0694b7
Author: maryannxue 
Authored: Thu Sep 25 13:52:31 2014 -0400
Committer: maryannxue 
Committed: Thu Sep 25 13:52:31 2014 -0400

--
 .../java/org/apache/phoenix/compile/StatementNormalizer.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/10efdb1f/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
index 88aa81c..698756b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
@@ -22,6 +22,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.phoenix.parse.AliasedNode;
+import org.apache.phoenix.parse.ArrayElemRefNode;
 import org.apache.phoenix.parse.BetweenParseNode;
 import org.apache.phoenix.parse.BindTableNode;
 import org.apache.phoenix.parse.ColumnParseNode;
@@ -144,7 +145,8 @@ public class StatementNormalizer extends ParseNodeRewriter {
 
 @Override
 public ParseNode visitLeave(ComparisonParseNode node, List 
nodes) throws SQLException {
- if (nodes.get(0).isStateless() && !nodes.get(1).isStateless()) {
+ if (nodes.get(0).isStateless() && !nodes.get(1).isStateless()
+ && !(nodes.get(1) instanceof ArrayElemRefNode)) {
  List normNodes = Lists.newArrayListWithExpectedSize(2);
  normNodes.add(nodes.get(1));
  normNodes.add(nodes.get(0));



Build failed in Jenkins: Phoenix | 4.0 | Hadoop2 #165

2014-09-25 Thread Apache Jenkins Server
See 

Changes:

[larsh] PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new 
immutable list object.

[maryannxue] PHOENIX-1168 Support non-correlated sub-queries in where clause 
having a comparison operator with no modifier or a comparison operator modified 
by ANY, SOME or ALL

--
[...truncated 1431 lines...]
at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114)
at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90)
at 
org.apache.hadoop.hbase.client.ClientScanner.next(ClientScanner.java:354)
at 
org.apache.phoenix.iterate.ScanningResultIterator.next(ScanningResultIterator.java:47)
at 
org.apache.phoenix.iterate.TableResultIterator.next(TableResultIterator.java:76)
at 
org.apache.phoenix.iterate.ChunkedResultIterator$SingleChunkResultIterator.next(ChunkedResultIterator.java:146)
at 
org.apache.phoenix.iterate.SpoolingResultIterator.(SpoolingResultIterator.java:110)
at 
org.apache.phoenix.iterate.SpoolingResultIterator.(SpoolingResultIterator.java:74)
at 
org.apache.phoenix.iterate.SpoolingResultIterator$SpoolingResultIteratorFactory.newIterator(SpoolingResultIterator.java:68)
at 
org.apache.phoenix.iterate.ChunkedResultIterator.(ChunkedResultIterator.java:89)
at 
org.apache.phoenix.iterate.ChunkedResultIterator$ChunkedResultIteratorFactory.newIterator(ChunkedResultIterator.java:69)
at 
org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:366)
at 
org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:357)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
at java.util.concurrent.FutureTask.run(FutureTask.java:166)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:724)

Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.508 sec - in 
org.apache.phoenix.end2end.ExtendedQueryExecIT
Running org.apache.phoenix.end2end.DynamicUpsertIT
Running org.apache.phoenix.end2end.NativeHBaseTypesIT
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.005 sec - in 
org.apache.phoenix.end2end.DynamicUpsertIT
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.309 sec - in 
org.apache.phoenix.end2end.NativeHBaseTypesIT
Running org.apache.phoenix.end2end.DynamicColumnIT
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.859 sec - in 
org.apache.phoenix.end2end.DynamicColumnIT
Tests run: 49, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 43.045 sec - 
in org.apache.phoenix.end2end.CastAndCoerceIT
Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 143.996 sec - 
in org.apache.phoenix.end2end.QueryIT
Tests run: 203, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 142.714 sec - 
in org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT

Results :

Tests in error: 
  ArrayIT.testArraySelectWithANYUsingVarLengthArray:615 ? PhoenixIO 
org.apache.p...
  ArrayIT.testArraySelectWithALL:531 ? PhoenixIO 
org.apache.phoenix.exception.Ph...
  ArrayIT.testArraySelectWithANY:502 ? PhoenixIO 
org.apache.phoenix.exception.Ph...
  ArrayIT.testArraySelectWithALLCombinedWithOR:584 ? PhoenixIO 
org.apache.phoeni...
  ArrayIT.testArraySelectWithANYCombinedWithOR:555 ? PhoenixIO 
org.apache.phoeni...

Tests run: 1270, Failures: 0, Errors: 5, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) 
@ phoenix-core ---
[INFO] Failsafe report directory: 

[INFO] parallel='none', perCoreThreadCount=true, threadCount=0, 
useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, 
threadCountMethods=0, parallelOptimized=true

---
 T E S T S
---

---
 T E S T S
---
Running org.apache.phoenix.trace.PhoenixTraceReaderIT
Running org.apache.phoenix.end2end.index.LocalIndexIT
Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT
Running org.apache.phoenix.end2end.index.DropViewIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.463 sec - in 
org.apache.phoenix.trace.PhoenixTraceReaderIT
Running org.apache.phoenix.end2end.index.SaltedIndexIT
Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.447 sec - in 
org.apache.phoenix.trace.PhoenixTableMetricsWriterIT
Running org.apache.phoenix.end2end.index.

Build failed in Jenkins: Phoenix | 4.0 | Hadoop1 #334

2014-09-25 Thread Apache Jenkins Server
See 

Changes:

[maryannxue] PHOENIX-1168 Support non-correlated sub-queries in where clause 
having a comparison operator with no modifier or a comparison operator modified 
by ANY, SOME or ALL

--
[...truncated 1413 lines...]
Tests run: 17, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.731 sec - in 
org.apache.phoenix.end2end.PercentileIT
Running org.apache.phoenix.end2end.StatsManagerIT
Tests run: 49, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 42.919 sec - 
in org.apache.phoenix.end2end.CastAndCoerceIT
Running org.apache.phoenix.end2end.CaseStatementIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.066 sec - in 
org.apache.phoenix.end2end.ColumnProjectionOptimizationIT
Running org.apache.phoenix.end2end.ToCharFunctionIT
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.864 sec - in 
org.apache.phoenix.end2end.ToCharFunctionIT
Running org.apache.phoenix.end2end.salted.SaltedTableIT
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.055 sec - in 
org.apache.phoenix.end2end.DistinctCountIT
Running org.apache.phoenix.end2end.salted.SaltedTableVarLengthRowKeyIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.202 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableVarLengthRowKeyIT
Running org.apache.phoenix.end2end.UpsertSelectIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.142 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableIT
Running org.apache.phoenix.end2end.StddevIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.39 sec - in 
org.apache.phoenix.end2end.StatsManagerIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.47 sec - in 
org.apache.phoenix.end2end.StddevIT
Running org.apache.phoenix.end2end.ProductMetricsIT
Running org.apache.phoenix.end2end.NotQueryIT
Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 146.734 sec - 
in org.apache.phoenix.end2end.QueryIT
Running org.apache.phoenix.end2end.ReadIsolationLevelIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.637 sec - in 
org.apache.phoenix.end2end.ReadIsolationLevelIT
Running org.apache.phoenix.end2end.CreateTableIT
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.503 sec - 
in org.apache.phoenix.end2end.UpsertSelectIT
Running org.apache.phoenix.end2end.CompareDecimalToLongIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.689 sec - in 
org.apache.phoenix.end2end.CompareDecimalToLongIT
Running org.apache.phoenix.end2end.OrderByIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.8 sec - in 
org.apache.phoenix.end2end.OrderByIT
Tests run: 61, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.697 sec - 
in org.apache.phoenix.end2end.ProductMetricsIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.587 sec - 
in org.apache.phoenix.end2end.CreateTableIT
Tests run: 63, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 44.849 sec - 
in org.apache.phoenix.end2end.CaseStatementIT
Tests run: 77, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 53.589 sec - 
in org.apache.phoenix.end2end.NotQueryIT

Results :

Tests in error: 
  ArrayIT.testArraySelectWithANYUsingVarLengthArray:615 ? PhoenixIO 
org.apache.p...
  ArrayIT.testArraySelectWithALL:531 ? PhoenixIO 
org.apache.phoenix.exception.Ph...
  ArrayIT.testArraySelectWithANY:502 ? PhoenixIO 
org.apache.phoenix.exception.Ph...
  ArrayIT.testArraySelectWithALLCombinedWithOR:584 ? PhoenixIO 
org.apache.phoeni...
  ArrayIT.testArraySelectWithANYCombinedWithOR:555 ? PhoenixIO 
org.apache.phoeni...

Tests run: 1270, Failures: 0, Errors: 5, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) 
@ phoenix-core ---
[INFO] Failsafe report directory: 

[INFO] parallel='none', perCoreThreadCount=true, threadCount=0, 
useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, 
threadCountMethods=0, parallelOptimized=true

---
 T E S T S
---

---
 T E S T S
---
Running org.apache.phoenix.end2end.UpsertSelectAutoCommitIT
Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT
Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT
Running org.apache.phoenix.end2end.BinaryRowKeyIT
Running org.apache.phoenix.trace.PhoenixTraceReaderIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.511 sec - in 
org.apache.phoenix.trace.PhoenixTableMetricsWriterIT
Running org.apache.phoenix.end2end.DynamicFamilyIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.188 sec - in 
org.apache.phoenix.

Build failed in Jenkins: Phoenix | Master | Hadoop1 #384

2014-09-25 Thread Apache Jenkins Server
See 

Changes:

[maryannxue] PHOENIX-1168 Support non-correlated sub-queries in where clause 
having a comparison operator with no modifier or a comparison operator modified 
by ANY, SOME or ALL

--
[...truncated 1593 lines...]
at 
org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:101)
at 
org.apache.phoenix.iterate.TableResultIterator.(TableResultIterator.java:57)
at 
org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:362)
at 
org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:357)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
at java.util.concurrent.FutureTask.run(FutureTask.java:166)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:724)
Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException: 
_LOCAL_IDX_T,,1411663117475.0da184881508acdc006744b4be1e7573.: Requested memory 
of 21196 bytes could not be allocated from remaining memory of 21196 bytes from 
global pool of 4 bytes after waiting for 0ms.
at 
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:77)
at 
org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:45)
at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:152)
at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845)
at 
org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092)
at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98)
at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
at java.lang.Thread.run(Thread.java:724)
Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested 
memory of 21196 bytes could not be allocated from remaining memory of 21196 
bytes from global pool of 4 bytes after waiting for 0ms.
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:81)
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100)
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106)
at 
org.apache.phoenix.cache.aggcache.SpillableGroupByCache.(SpillableGroupByCache.java:150)
at 
org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver$GroupByCacheFactory.newCache(GroupedAggregateRegionObserver.java:362)
at 
org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver.scanUnordered(GroupedAggregateRegionObserver.java:397)
at 
org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver.doPostScannerOpen(GroupedAggregateRegionObserver.java:160)
at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134)
... 8 more

at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at 
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
at 
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
at 
org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
at 
org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:95)
at 
org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:285)
at 
org.apache.hadoop.hbase.client.ScannerCallable.openScanner(ScannerCallable.java:316)
at 
org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:164)
at 
org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59)
at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114)
at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90)
at 
org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:282)
at 
org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:187)
at 
org.apache.hadoop.hbase.client.ClientScanner.(ClientScanner.java:182)
at 

[2/2] git commit: PHOENIX-1168 Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL

2014-09-25 Thread maryannxue
PHOENIX-1168 Support non-correlated sub-queries in where clause having a 
comparison operator with no modifier or a comparison operator modified by ANY, 
SOME or ALL


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/81477ee8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/81477ee8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/81477ee8

Branch: refs/heads/4.0
Commit: 81477ee8961ea6b99d981159f06ef644fd2f7dc0
Parents: 349d04f
Author: maryannxue 
Authored: Thu Sep 25 12:38:00 2014 -0400
Committer: maryannxue 
Committed: Thu Sep 25 12:38:00 2014 -0400

--
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 322 +-
 phoenix-core/src/main/antlr3/PhoenixSQL.g   |   4 +-
 .../phoenix/compile/ExpressionCompiler.java |  37 ++
 .../apache/phoenix/compile/JoinCompiler.java|   7 +
 .../apache/phoenix/compile/QueryCompiler.java   |  57 ++--
 .../phoenix/compile/StatementContext.java   |  15 +
 .../phoenix/compile/StatementNormalizer.java|  13 +
 .../apache/phoenix/compile/WhereCompiler.java   |  46 ++-
 .../phoenix/exception/SQLExceptionCode.java |   1 +
 .../apache/phoenix/execute/BaseQueryPlan.java   |   2 +-
 .../apache/phoenix/execute/HashJoinPlan.java| 334 +--
 .../expression/ComparisonExpression.java|   3 +-
 .../apache/phoenix/parse/ParseNodeFactory.java  |  18 +-
 .../apache/phoenix/parse/ParseNodeRewriter.java |  36 +-
 .../apache/phoenix/parse/ParseNodeVisitor.java  |   7 +-
 .../phoenix/parse/SelectStatementRewriter.java  |  13 +
 .../StatelessTraverseAllParseNodeVisitor.java   |   5 +
 .../apache/phoenix/parse/SubqueryParseNode.java |   8 +-
 .../parse/TraverseAllParseNodeVisitor.java  |  10 +
 .../parse/TraverseNoParseNodeVisitor.java   |  15 +
 .../phoenix/compile/JoinQueryCompilerTest.java  |   4 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  22 +-
 22 files changed, 738 insertions(+), 241 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/81477ee8/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index dcd96d3..ceba009 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -119,7 +119,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [I.NAME]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ITEM_TABLE_DISPLAY_NAME,
 /* 
@@ -132,7 +132,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.item_id]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
 "SERVER FILTER BY FIRST KEY ONLY",
@@ -147,7 +147,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY 
[I.item_id]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, 
I.item_id]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ORDER_TABLE_DISPLAY_NAME,
 /* 
@@ -160,7 +160,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [I.NAME]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ORDER_TABLE_DISPLAY_NAME,
 /*
@@ -174,7 +174,7 @@ public class HashJoinIT extends BaseHBas

[1/2] PHOENIX-1168 Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL

2014-09-25 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/4.0 349d04fda -> 81477ee89


http://git-wip-us.apache.org/repos/asf/phoenix/blob/81477ee8/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index a74e0f1..d4c119b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -150,7 +150,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
 return iterator(Collections.emptyList());
 }
 
-public final ResultIterator iterator(final List 
dependencies) throws SQLException {
+public final ResultIterator iterator(final List 
dependencies) throws SQLException {
 if (context.getScanRanges() == ScanRanges.NOTHING) {
 return ResultIterator.EMPTY_ITERATOR;
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/81477ee8/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 6154c88..0d09f79 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -17,14 +17,14 @@
  */
 package org.apache.phoenix.execute;
 
-import static java.util.Collections.emptyMap;
 import static org.apache.phoenix.util.LogUtil.addCustomAnnotations;
 
 import java.sql.ParameterMetaData;
 import java.sql.SQLException;
-import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
+import org.apache.phoenix.compile.ColumnProjector;
 import org.apache.phoenix.compile.ExplainPlan;
 import org.apache.phoenix.compile.FromCompiler;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
@@ -45,12 +46,14 @@ import org.apache.phoenix.compile.RowProjector;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.compile.WhereCompiler;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.ComparisonExpression;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.InListExpression;
 import org.apache.phoenix.expression.LiteralExpression;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.expression.RowValueConstructorExpression;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.job.JobManager.JobCallable;
@@ -66,9 +69,11 @@ import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PArrayDataType;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 
@@ -80,29 +85,43 @@ public class HashJoinPlan implements QueryPlan {
 private final FilterableStatement statement;
 private final BaseQueryPlan plan;
 private final HashJoinInfo joinInfo;
-private final List[] hashExpressions;
-private final Expression[] keyRangeLhsExpressions;
-private final Expression[] keyRangeRhsExpressions;
-private final QueryPlan[] hashPlans;
-private final TupleProjector[] clientProjectors;
-private final boolean[] hasFilters;
+private final SubPlan[] subPlans;
+private final boolean recompileWhereClause;
 private final boolean forceHashJoinRangeScan;
 private final boolean forceHashJoinSkipScan;
-
-public HashJoinPlan(FilterableStatement statement, 
-BaseQueryPlan plan, HashJoinInfo joinInfo,
-List[] hashExpressions, Expression[] 
keyRangeLhsExpressions,
-Expression[] keyRangeRhsExpressions, QueryPlan[] 

[2/2] git commit: PHOENIX-1168 Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL

2014-09-25 Thread maryannxue
PHOENIX-1168 Support non-correlated sub-queries in where clause having a 
comparison operator with no modifier or a comparison operator modified by ANY, 
SOME or ALL


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a0694b77
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a0694b77
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a0694b77

Branch: refs/heads/master
Commit: a0694b77c1e16c90af96d3092b1f941052493244
Parents: ea5a797
Author: maryannxue 
Authored: Thu Sep 25 12:30:03 2014 -0400
Committer: maryannxue 
Committed: Thu Sep 25 12:30:03 2014 -0400

--
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 322 +-
 phoenix-core/src/main/antlr3/PhoenixSQL.g   |   4 +-
 .../phoenix/compile/ExpressionCompiler.java |  37 ++
 .../apache/phoenix/compile/JoinCompiler.java|   7 +
 .../apache/phoenix/compile/QueryCompiler.java   |  57 ++--
 .../phoenix/compile/StatementContext.java   |  15 +
 .../phoenix/compile/StatementNormalizer.java|  13 +
 .../apache/phoenix/compile/WhereCompiler.java   |  46 ++-
 .../phoenix/exception/SQLExceptionCode.java |   1 +
 .../apache/phoenix/execute/BaseQueryPlan.java   |   2 +-
 .../apache/phoenix/execute/HashJoinPlan.java| 334 +--
 .../expression/ComparisonExpression.java|   3 +-
 .../apache/phoenix/parse/ParseNodeFactory.java  |  18 +-
 .../apache/phoenix/parse/ParseNodeRewriter.java |  36 +-
 .../apache/phoenix/parse/ParseNodeVisitor.java  |   7 +-
 .../phoenix/parse/SelectStatementRewriter.java  |  13 +
 .../StatelessTraverseAllParseNodeVisitor.java   |   5 +
 .../apache/phoenix/parse/SubqueryParseNode.java |   8 +-
 .../parse/TraverseAllParseNodeVisitor.java  |  10 +
 .../parse/TraverseNoParseNodeVisitor.java   |  15 +
 .../phoenix/compile/JoinQueryCompilerTest.java  |   4 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  22 +-
 22 files changed, 738 insertions(+), 241 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a0694b77/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index dcd96d3..ceba009 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -119,7 +119,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [I.NAME]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ITEM_TABLE_DISPLAY_NAME,
 /* 
@@ -132,7 +132,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.item_id]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
 "SERVER FILTER BY FIRST KEY ONLY",
@@ -147,7 +147,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY 
[I.item_id]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, 
I.item_id]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ORDER_TABLE_DISPLAY_NAME,
 /* 
@@ -160,7 +160,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 "SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT SORTED BY [I.NAME]\n" +
-"PARALLEL EQUI-JOIN 1 HASH TABLES:\n" +
+"PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n" +
 "BUILD HASH TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + 
JOIN_ORDER_TABLE_DISPLAY_NAME,
 /*
@@ -174,7 +174,7 @@ public class HashJoinIT extends BaseH

[1/2] PHOENIX-1168 Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL

2014-09-25 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/master ea5a797eb -> a0694b77c


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a0694b77/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index a74e0f1..d4c119b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -150,7 +150,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
 return iterator(Collections.emptyList());
 }
 
-public final ResultIterator iterator(final List 
dependencies) throws SQLException {
+public final ResultIterator iterator(final List 
dependencies) throws SQLException {
 if (context.getScanRanges() == ScanRanges.NOTHING) {
 return ResultIterator.EMPTY_ITERATOR;
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a0694b77/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 6154c88..0d09f79 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -17,14 +17,14 @@
  */
 package org.apache.phoenix.execute;
 
-import static java.util.Collections.emptyMap;
 import static org.apache.phoenix.util.LogUtil.addCustomAnnotations;
 
 import java.sql.ParameterMetaData;
 import java.sql.SQLException;
-import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
+import org.apache.phoenix.compile.ColumnProjector;
 import org.apache.phoenix.compile.ExplainPlan;
 import org.apache.phoenix.compile.FromCompiler;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
@@ -45,12 +46,14 @@ import org.apache.phoenix.compile.RowProjector;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.compile.WhereCompiler;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.ComparisonExpression;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.InListExpression;
 import org.apache.phoenix.expression.LiteralExpression;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.expression.RowValueConstructorExpression;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.job.JobManager.JobCallable;
@@ -66,9 +69,11 @@ import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PArrayDataType;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 
@@ -80,29 +85,43 @@ public class HashJoinPlan implements QueryPlan {
 private final FilterableStatement statement;
 private final BaseQueryPlan plan;
 private final HashJoinInfo joinInfo;
-private final List[] hashExpressions;
-private final Expression[] keyRangeLhsExpressions;
-private final Expression[] keyRangeRhsExpressions;
-private final QueryPlan[] hashPlans;
-private final TupleProjector[] clientProjectors;
-private final boolean[] hasFilters;
+private final SubPlan[] subPlans;
+private final boolean recompileWhereClause;
 private final boolean forceHashJoinRangeScan;
 private final boolean forceHashJoinSkipScan;
-
-public HashJoinPlan(FilterableStatement statement, 
-BaseQueryPlan plan, HashJoinInfo joinInfo,
-List[] hashExpressions, Expression[] 
keyRangeLhsExpressions,
-Expression[] keyRangeRhsExpressions, QueryPlan

Apache-Phoenix | 4.0 | Hadoop1 | Build Successful

2014-09-25 Thread Apache Jenkins Server
4.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastCompletedBuild/testReport/

Changes
[larsh] PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new immutable list object.



Build failed in Jenkins: Phoenix | Master | Hadoop1 #383

2014-09-25 Thread Apache Jenkins Server
See 

Changes:

[larsh] PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new 
immutable list object.

--
[...truncated 653 lines...]
at 
org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:101)
at 
org.apache.phoenix.iterate.TableResultIterator.(TableResultIterator.java:57)
at 
org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:362)
at 
org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:357)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
at java.util.concurrent.FutureTask.run(FutureTask.java:166)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:724)
Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException: 
_LOCAL_IDX_T,e\x00\x00\x00\x00\x00\x00\x00\x00\x00,1411660419823.d785014a3005655ccdd68d85075dd73e.:
 Requested memory of 21196 bytes could not be allocated from remaining memory 
of 21196 bytes from global pool of 4 bytes after waiting for 0ms.
at 
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:77)
at 
org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:45)
at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:152)
at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845)
at 
org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092)
at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98)
at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
at java.lang.Thread.run(Thread.java:724)
Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested 
memory of 21196 bytes could not be allocated from remaining memory of 21196 
bytes from global pool of 4 bytes after waiting for 0ms.
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:81)
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100)
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106)
at 
org.apache.phoenix.cache.aggcache.SpillableGroupByCache.(SpillableGroupByCache.java:150)
at 
org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver$GroupByCacheFactory.newCache(GroupedAggregateRegionObserver.java:362)
at 
org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver.scanUnordered(GroupedAggregateRegionObserver.java:397)
at 
org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver.doPostScannerOpen(GroupedAggregateRegionObserver.java:160)
at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134)
... 8 more

at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at 
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
at 
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
at 
org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
at 
org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:95)
at 
org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:285)
at 
org.apache.hadoop.hbase.client.ScannerCallable.openScanner(ScannerCallable.java:316)
at 
org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:164)
at 
org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59)
at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114)
at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90)
at 
org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:282)
at 
org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:187)
at 
org.apache.hadoop.hbase.client.ClientScanner.(ClientScanner.java:182)
at 
org.apache.hadoop.hbase.client.ClientScanner.(C

git commit: PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new immutable list object.

2014-09-25 Thread larsh
Repository: phoenix
Updated Branches:
  refs/heads/3.0 87fe59e5b -> 2d250fbff


PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new immutable list 
object.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2d250fbf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2d250fbf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2d250fbf

Branch: refs/heads/3.0
Commit: 2d250fbff324baad75af7bd1fe1879e602fede1d
Parents: 87fe59e
Author: Lars Hofhansl 
Authored: Thu Sep 25 08:50:22 2014 -0700
Committer: Lars Hofhansl 
Committed: Thu Sep 25 08:50:22 2014 -0700

--
 .../java/org/apache/phoenix/coprocessor/ScanRegionObserver.java | 5 -
 .../org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java | 5 ++---
 2 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d250fbf/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index b5682af..4389426 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -25,7 +25,9 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Set;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Sets;
+
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
@@ -375,7 +377,8 @@ public class ScanRegionObserver extends 
BaseScannerRegionObserver {
 
 private void replaceArrayIndexElement(final 
Set arrayKVRefs,
 final Expression[] arrayFuncRefs, List result) {
-MultiKeyValueTuple tuple = new MultiKeyValueTuple(result);
+// make a copy of the results array here, as we're modifying 
it below
+MultiKeyValueTuple tuple = new 
MultiKeyValueTuple(ImmutableList.copyOf(result));
 // The size of both the arrays would be same?
 // Using KeyValueSchema to set and retrieve the value
 // collect the first kv to get the row

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d250fbf/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
index 7380d1d..dd35aef 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.util.KeyValueUtil;
 
-import com.google.common.collect.ImmutableList;
-
 
 public class MultiKeyValueTuple extends BaseTuple {
 private List values;
@@ -37,8 +35,9 @@ public class MultiKeyValueTuple extends BaseTuple {
 public MultiKeyValueTuple() {
 }
 
+/** Caller must not modify the list that is passed here */
 public void setKeyValues(List values) {
-this.values = ImmutableList.copyOf(values);
+this.values = values;
 }
 
 @Override



git commit: PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new immutable list object.

2014-09-25 Thread larsh
Repository: phoenix
Updated Branches:
  refs/heads/master 940b70d65 -> ea5a797eb


PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new immutable list 
object.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ea5a797e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ea5a797e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ea5a797e

Branch: refs/heads/master
Commit: ea5a797ebcd611670c5400aa7aaae3b3b414c43d
Parents: 940b70d
Author: Lars Hofhansl 
Authored: Thu Sep 25 08:43:33 2014 -0700
Committer: Lars Hofhansl 
Committed: Thu Sep 25 08:43:33 2014 -0700

--
 .../java/org/apache/phoenix/coprocessor/ScanRegionObserver.java | 4 +++-
 .../org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java | 5 ++---
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ea5a797e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 548aadb..8c72dd5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -63,6 +63,7 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
 
@@ -405,7 +406,8 @@ public class ScanRegionObserver extends 
BaseScannerRegionObserver {
 
 private void replaceArrayIndexElement(final 
Set arrayKVRefs,
 final Expression[] arrayFuncRefs, List result) {
-MultiKeyValueTuple tuple = new MultiKeyValueTuple(result);
+// make a copy of the results array here, as we're modifying 
it below
+MultiKeyValueTuple tuple = new 
MultiKeyValueTuple(ImmutableList.copyOf(result));
 // The size of both the arrays would be same?
 // Using KeyValueSchema to set and retrieve the value
 // collect the first kv to get the row

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ea5a797e/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
index 255c54e..53f155b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.util.KeyValueUtil;
 
-import com.google.common.collect.ImmutableList;
-
 
 public class MultiKeyValueTuple extends BaseTuple {
 private List values;
@@ -37,8 +35,9 @@ public class MultiKeyValueTuple extends BaseTuple {
 public MultiKeyValueTuple() {
 }
 
+/** Caller must not modify the list that is passed here */
 public void setKeyValues(List values) {
-this.values = ImmutableList.copyOf(values);
+this.values = values;
 }
 
 @Override



git commit: PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new immutable list object.

2014-09-25 Thread larsh
Repository: phoenix
Updated Branches:
  refs/heads/4.0 b2bf3f523 -> 349d04fda


PHOENIX-1281 Each MultiKeyValueTuple.setKeyValues creates a new immutable list 
object.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/349d04fd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/349d04fd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/349d04fd

Branch: refs/heads/4.0
Commit: 349d04fda90e5d0e0a080c6ca46693c890dc1046
Parents: b2bf3f5
Author: Lars Hofhansl 
Authored: Thu Sep 25 08:43:33 2014 -0700
Committer: Lars Hofhansl 
Committed: Thu Sep 25 08:43:53 2014 -0700

--
 .../java/org/apache/phoenix/coprocessor/ScanRegionObserver.java | 4 +++-
 .../org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java | 5 ++---
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/349d04fd/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 548aadb..8c72dd5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -63,6 +63,7 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
 
@@ -405,7 +406,8 @@ public class ScanRegionObserver extends 
BaseScannerRegionObserver {
 
 private void replaceArrayIndexElement(final 
Set arrayKVRefs,
 final Expression[] arrayFuncRefs, List result) {
-MultiKeyValueTuple tuple = new MultiKeyValueTuple(result);
+// make a copy of the results array here, as we're modifying 
it below
+MultiKeyValueTuple tuple = new 
MultiKeyValueTuple(ImmutableList.copyOf(result));
 // The size of both the arrays would be same?
 // Using KeyValueSchema to set and retrieve the value
 // collect the first kv to get the row

http://git-wip-us.apache.org/repos/asf/phoenix/blob/349d04fd/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
index 255c54e..53f155b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.util.KeyValueUtil;
 
-import com.google.common.collect.ImmutableList;
-
 
 public class MultiKeyValueTuple extends BaseTuple {
 private List values;
@@ -37,8 +35,9 @@ public class MultiKeyValueTuple extends BaseTuple {
 public MultiKeyValueTuple() {
 }
 
+/** Caller must not modify the list that is passed here */
 public void setKeyValues(List values) {
-this.values = ImmutableList.copyOf(values);
+this.values = values;
 }
 
 @Override



Apache-Phoenix | 3.0 | Hadoop1 | Build Successful

2014-09-25 Thread Apache Jenkins Server
3.0 branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf/phoenix.git

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-3.0-hadoop1/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-3.0-hadoop1/lastCompletedBuild/testReport/

Changes
[ramkrishna] Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is



Apache-Phoenix | 4.0 | Hadoop1 | Build Successful

2014-09-25 Thread Apache Jenkins Server
4.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastCompletedBuild/testReport/

Changes
[ramkrishna] Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is



Build failed in Jenkins: Phoenix | Master | Hadoop1 #382

2014-09-25 Thread Apache Jenkins Server
See 

Changes:

[ramkrishna] Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG 
is

--
[...truncated 807 lines...]
at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98)
at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
at java.lang.Thread.run(Thread.java:724)
Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested 
memory of 104000 bytes is larger than global pool of 4 bytes.
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72)
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100)
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106)
at 
org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:234)
at 
org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:221)
at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134)
... 8 more

at java.util.concurrent.FutureTask$Sync.innerGet(FutureTask.java:262)
at java.util.concurrent.FutureTask.get(FutureTask.java:119)
at 
org.apache.phoenix.iterate.ParallelIterators.getIterators(ParallelIterators.java:280)
at 
org.apache.phoenix.iterate.MergeSortResultIterator.getIterators(MergeSortResultIterator.java:48)
at 
org.apache.phoenix.iterate.MergeSortResultIterator.minIterator(MergeSortResultIterator.java:63)
at 
org.apache.phoenix.iterate.MergeSortResultIterator.next(MergeSortResultIterator.java:90)
at 
org.apache.phoenix.iterate.MergeSortTopNResultIterator.next(MergeSortTopNResultIterator.java:87)
at 
org.apache.phoenix.jdbc.PhoenixResultSet.next(PhoenixResultSet.java:732)
at 
org.apache.phoenix.end2end.SortOrderFIT.runQuery(SortOrderFIT.java:396)
at 
org.apache.phoenix.end2end.SortOrderFIT.runQueryTest(SortOrderFIT.java:353)
at 
org.apache.phoenix.end2end.SortOrderFIT.queryDescDateWithExplicitOrderBy(SortOrderFIT.java:251)
Caused by: org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException: 
DESCCOLUMNSORTORDERTEST,,1411642454947.35d84ba17616bad89a1ed754e96608e6.: 
Requested memory of 104000 bytes is larger than global pool of 4 bytes.
at 
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:77)
at 
org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:45)
at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:152)
at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845)
at 
org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092)
at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98)
at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
at java.lang.Thread.run(Thread.java:724)
Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested 
memory of 104000 bytes is larger than global pool of 4 bytes.
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72)
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100)
at 
org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106)
at 
org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:234)
at 
org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:221)
at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134)
... 8 more

at 
org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:101)
at 
org.apache.phoenix.iterate.TableResultIterator.(TableResultIterator.java:57)
at 
org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:362)
at 
org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:357)
at java.util.concurren

git commit: Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is already present

2014-09-25 Thread ramkrishna
Repository: phoenix
Updated Branches:
  refs/heads/3.0 7ebb87e7d -> 87fe59e5b


Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is
already present


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/87fe59e5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/87fe59e5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/87fe59e5

Branch: refs/heads/3.0
Commit: 87fe59e5bca692397771e4fdfe298e90eff49384
Parents: 7ebb87e
Author: Ramkrishna 
Authored: Thu Sep 25 16:21:00 2014 +0530
Committer: Ramkrishna 
Committed: Thu Sep 25 16:21:00 2014 +0530

--
 .../phoenix/query/ConnectionQueryServicesImpl.java   | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/87fe59e5/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 0676e86..926407a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1295,9 +1295,6 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 ConnectionQueryServicesImpl.this, url, 
scnProps, newEmptyMetaData());
 try {
 
metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA);
-// TODO : Get this from a configuration
-metaConnection.createStatement().executeUpdate(
-
QueryConstants.CREATE_STATS_TABLE_METADATA);
 } catch (NewerTableAlreadyExistsException ignore) {
 // Ignore, as this will happen if the 
SYSTEM.CATALOG already exists at this fixed timestamp.
 // A TableAlreadyExistsException is not 
thrown, since the table only exists *after* this fixed timestamp.
@@ -1326,6 +1323,14 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 metaConnection = 
addColumnsIfNotExists(metaConnection, 
PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME,
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, newColumns);
 }
+try {
+metaConnection.createStatement().executeUpdate(
+
QueryConstants.CREATE_STATS_TABLE_METADATA);
+} catch (NewerTableAlreadyExistsException ignore) {
+
+} catch (TableAlreadyExistsException ignore) {
+
+}
 } catch (Exception e) {
 if (e instanceof SQLException) {
 initializationException = (SQLException)e;



git commit: Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is already present

2014-09-25 Thread ramkrishna
Repository: phoenix
Updated Branches:
  refs/heads/4.0 1110aeddf -> b2bf3f523


Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is
already present


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b2bf3f52
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b2bf3f52
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b2bf3f52

Branch: refs/heads/4.0
Commit: b2bf3f5230666f311d23910dcb9abf17f615e868
Parents: 1110aed
Author: Ramkrishna 
Authored: Thu Sep 25 16:04:32 2014 +0530
Committer: Ramkrishna 
Committed: Thu Sep 25 16:04:32 2014 +0530

--
 .../query/ConnectionQueryServicesImpl.java  | 26 ++--
 1 file changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2bf3f52/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 987d200..a0fb614 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -17,10 +17,6 @@
  */
 package org.apache.phoenix.query;
 
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CYCLE_FLAG;
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 
@@ -1518,12 +1514,28 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 try {
 
metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_SEQUENCE_METADATA);
-
-// TODO : Get this from a configuration
+} catch (NewerTableAlreadyExistsException ignore) {
+// Ignore, as this will happen if the 
SYSTEM.SEQUENCE already exists at this fixed timestamp.
+// A TableAlreadyExistsException is not 
thrown, since the table only exists *after* this fixed timestamp.
+
+} catch (TableAlreadyExistsException ignore) {
+// This will occur if we have an older 
SYSTEM.SEQUENCE, so we need to update it to include
+// any new columns we've added.
+String newColumns = 
PhoenixDatabaseMetaData.MIN_VALUE + " " + PDataType.LONG.getSqlTypeName() + ", "
++ PhoenixDatabaseMetaData.MAX_VALUE + 
" " + PDataType.LONG.getSqlTypeName() + ", " + 
PhoenixDatabaseMetaData.CYCLE_FLAG + " "
++ PDataType.BOOLEAN.getSqlTypeName() + 
", " + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " "
++ PDataType.BOOLEAN.getSqlTypeName();
+metaConnection = 
addColumnsIfNotExists(metaConnection,
+
PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME,
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, newColumns);
+}
+try {
 metaConnection.createStatement().executeUpdate(
 
QueryConstants.CREATE_STATS_TABLE_METADATA);
 } catch (NewerTableAlreadyExistsException ignore) {
-} catch (TableAlreadyExistsException ignore) {
+
+} catch(TableAlreadyExistsException ignore) {
+
 }
 } catch (Exception e) {
 if (e instanceof SQLException) {



git commit: Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is already present

2014-09-25 Thread ramkrishna
Repository: phoenix
Updated Branches:
  refs/heads/master 6908c90b5 -> 940b70d65


Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is
already present


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/940b70d6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/940b70d6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/940b70d6

Branch: refs/heads/master
Commit: 940b70d65495e93c67323ef93eca8191fecdfe98
Parents: 6908c90
Author: Ramkrishna 
Authored: Thu Sep 25 16:03:05 2014 +0530
Committer: Ramkrishna 
Committed: Thu Sep 25 16:03:05 2014 +0530

--
 .../query/ConnectionQueryServicesImpl.java  | 26 ++--
 1 file changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/940b70d6/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 2512178..dfd56bc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -17,10 +17,6 @@
  */
 package org.apache.phoenix.query;
 
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CYCLE_FLAG;
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 
@@ -1518,12 +1514,28 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 try {
 
metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_SEQUENCE_METADATA);
-
-// TODO : Get this from a configuration
+} catch (NewerTableAlreadyExistsException ignore) {
+// Ignore, as this will happen if the 
SYSTEM.SEQUENCE already exists at this fixed timestamp.
+// A TableAlreadyExistsException is not 
thrown, since the table only exists *after* this fixed timestamp.
+
+} catch (TableAlreadyExistsException ignore) {
+// This will occur if we have an older 
SYSTEM.SEQUENCE, so we need to update it to include
+// any new columns we've added.
+String newColumns = 
PhoenixDatabaseMetaData.MIN_VALUE + " " + PDataType.LONG.getSqlTypeName() + ", "
++ PhoenixDatabaseMetaData.MAX_VALUE + 
" " + PDataType.LONG.getSqlTypeName() + ", " + 
PhoenixDatabaseMetaData.CYCLE_FLAG + " "
++ PDataType.BOOLEAN.getSqlTypeName() + 
", " + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " "
++ PDataType.BOOLEAN.getSqlTypeName();
+metaConnection = 
addColumnsIfNotExists(metaConnection,
+
PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME,
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, newColumns);
+}
+try {
 metaConnection.createStatement().executeUpdate(
 
QueryConstants.CREATE_STATS_TABLE_METADATA);
 } catch (NewerTableAlreadyExistsException ignore) {
-} catch (TableAlreadyExistsException ignore) {
+
+} catch(TableAlreadyExistsException ignore) {
+
 }
 } catch (Exception e) {
 if (e instanceof SQLException) {