[2/3] PHOENIX-177: Collect usage and performance metrics

2014-07-28 Thread jyates
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index e24591d..b3bec6e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.hbase.index.builder.IndexBuildManager;
 import org.apache.phoenix.hbase.index.builder.IndexBuilder;
@@ -64,6 +65,11 @@ import org.apache.phoenix.hbase.index.write.IndexWriter;
 import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache;
 import 
org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy;
 import 
org.apache.phoenix.hbase.index.write.recovery.TrackingParallelWriterIndexCommitter;
+import org.apache.phoenix.trace.TracingCompat;
+import org.apache.phoenix.trace.util.NullSpan;
+import org.apache.phoenix.trace.util.Tracing;
+import org.cloudera.htrace.Span;
+import org.cloudera.htrace.Trace;
 
 import com.google.common.collect.Multimap;
 
@@ -134,10 +140,18 @@ public class Indexer extends BaseRegionObserver {
 private static final int INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION = 
VersionUtil
 .encodeVersion(0.94.9);
 
+/**
+ * Raw configuration, for tracing. Coprocessors generally will get a 
subset configuration (if
+ * they are on a per-table basis), so we need the raw one from the server, 
so we can get the
+ * actual configuration keys
+ */
+private Configuration rawConf;
+
   @Override
   public void start(CoprocessorEnvironment e) throws IOException {
   try {
 final RegionCoprocessorEnvironment env = 
(RegionCoprocessorEnvironment) e;
+this.rawConf = env.getRegionServerServices().getConfiguration();
 String serverName = 
env.getRegionServerServices().getServerName().getServerName();
 if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
   // make sure the right version - combinations are allowed.
@@ -312,12 +326,24 @@ public class Indexer extends BaseRegionObserver {
 // don't worry which one we get
 WALEdit edit = miniBatchOp.getWalEdit(0);
 
+// get the current span, or just use a null-span to avoid a bunch of 
if statements
+Span current = Trace.startSpan(Starting to build index 
updates).getSpan();
+if (current == null) {
+current = NullSpan.INSTANCE;
+}
+
 // get the index updates for all elements in this batch
 CollectionPairMutation, byte[] indexUpdates =
 this.builder.getIndexUpdate(miniBatchOp, mutations.values());
 
+current.addTimelineAnnotation(Built index updates, doing preStep);
+TracingCompat.addAnnotation(current, index update count, 
indexUpdates.size());
+
 // write them, either to WAL or the index tables
 doPre(indexUpdates, edit, durability);
+
+// close the span
+current.stop();
   }
 
   private class MultiMutation extends Mutation {
@@ -458,16 +484,24 @@ public class Indexer extends BaseRegionObserver {
   return;
 }
 
+// get the current span, or just use a null-span to avoid a bunch of 
if statements
+Span current = Trace.startSpan(Completing index writes).getSpan();
+if (current == null) {
+current = NullSpan.INSTANCE;
+}
+
 // there is a little bit of excess here- we iterate all the non-indexed 
kvs for this check first
 // and then do it again later when getting out the index updates. This 
should be pretty minor
 // though, compared to the rest of the runtime
 IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
+
 /*
  * early exit - we have nothing to write, so we don't need to do anything 
else. NOTE: we don't
  * release the WAL Rolling lock (INDEX_UPDATE_LOCK) since we never take it 
in doPre if there are
  * no index updates.
  */
 if (ikv == null) {
+current.stop();
   return;
 }
 
@@ -483,6 +517,7 @@ public class Indexer extends BaseRegionObserver {
   // references originally - therefore, we just pass in a null factory 
here and use the ones
   // already specified on each reference
   try {
+current.addTimelineAnnotation(Actually doing index update for 
first time);
   writer.writeAndKillYourselfOnFailure(indexUpdates);
   } finally {
 // With a custom kill 

[2/3] PHOENIX-177: Collect usage and performance metrics

2014-07-28 Thread jyates
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e8def027/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index e24591d..b3bec6e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.hbase.index.builder.IndexBuildManager;
 import org.apache.phoenix.hbase.index.builder.IndexBuilder;
@@ -64,6 +65,11 @@ import org.apache.phoenix.hbase.index.write.IndexWriter;
 import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache;
 import 
org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy;
 import 
org.apache.phoenix.hbase.index.write.recovery.TrackingParallelWriterIndexCommitter;
+import org.apache.phoenix.trace.TracingCompat;
+import org.apache.phoenix.trace.util.NullSpan;
+import org.apache.phoenix.trace.util.Tracing;
+import org.cloudera.htrace.Span;
+import org.cloudera.htrace.Trace;
 
 import com.google.common.collect.Multimap;
 
@@ -134,10 +140,18 @@ public class Indexer extends BaseRegionObserver {
 private static final int INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION = 
VersionUtil
 .encodeVersion(0.94.9);
 
+/**
+ * Raw configuration, for tracing. Coprocessors generally will get a 
subset configuration (if
+ * they are on a per-table basis), so we need the raw one from the server, 
so we can get the
+ * actual configuration keys
+ */
+private Configuration rawConf;
+
   @Override
   public void start(CoprocessorEnvironment e) throws IOException {
   try {
 final RegionCoprocessorEnvironment env = 
(RegionCoprocessorEnvironment) e;
+this.rawConf = env.getRegionServerServices().getConfiguration();
 String serverName = 
env.getRegionServerServices().getServerName().getServerName();
 if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
   // make sure the right version - combinations are allowed.
@@ -312,12 +326,24 @@ public class Indexer extends BaseRegionObserver {
 // don't worry which one we get
 WALEdit edit = miniBatchOp.getWalEdit(0);
 
+// get the current span, or just use a null-span to avoid a bunch of 
if statements
+Span current = Trace.startSpan(Starting to build index 
updates).getSpan();
+if (current == null) {
+current = NullSpan.INSTANCE;
+}
+
 // get the index updates for all elements in this batch
 CollectionPairMutation, byte[] indexUpdates =
 this.builder.getIndexUpdate(miniBatchOp, mutations.values());
 
+current.addTimelineAnnotation(Built index updates, doing preStep);
+TracingCompat.addAnnotation(current, index update count, 
indexUpdates.size());
+
 // write them, either to WAL or the index tables
 doPre(indexUpdates, edit, durability);
+
+// close the span
+current.stop();
   }
 
   private class MultiMutation extends Mutation {
@@ -458,16 +484,24 @@ public class Indexer extends BaseRegionObserver {
   return;
 }
 
+// get the current span, or just use a null-span to avoid a bunch of 
if statements
+Span current = Trace.startSpan(Completing index writes).getSpan();
+if (current == null) {
+current = NullSpan.INSTANCE;
+}
+
 // there is a little bit of excess here- we iterate all the non-indexed 
kvs for this check first
 // and then do it again later when getting out the index updates. This 
should be pretty minor
 // though, compared to the rest of the runtime
 IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
+
 /*
  * early exit - we have nothing to write, so we don't need to do anything 
else. NOTE: we don't
  * release the WAL Rolling lock (INDEX_UPDATE_LOCK) since we never take it 
in doPre if there are
  * no index updates.
  */
 if (ikv == null) {
+current.stop();
   return;
 }
 
@@ -483,6 +517,7 @@ public class Indexer extends BaseRegionObserver {
   // references originally - therefore, we just pass in a null factory 
here and use the ones
   // already specified on each reference
   try {
+current.addTimelineAnnotation(Actually doing index update for 
first time);
   writer.writeAndKillYourselfOnFailure(indexUpdates);
   } finally {
 // With a custom kill 

[3/3] git commit: PHOENIX-177: Collect usage and performance metrics

2014-07-28 Thread jyates
PHOENIX-177: Collect usage and performance metrics

Add basic Dapper-like tracing (using Cloudera's HTrace library)
to phoenix requests. This is the basic infrastructure to
support more holistic, non-profiler based analysis.

This patch includes, among other things, the
infrastructure to use HTrace, async-tracing handling
via the Hadoop metrics2 framework, and trace read/write
to a phoenix table.

Currently, do NOT support Hadoop1 (though does work
against Hadoop1).

Default builds to hadoop2, rather than hadoop1 (particularly as hadoop1 is
now a second-class citizen).


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b7f46c10
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b7f46c10
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b7f46c10

Branch: refs/heads/master
Commit: b7f46c1051de3e23630dccb82677a0a16985f27c
Parents: 9185f76
Author: Jesse Yates jya...@apache.org
Authored: Fri Jun 6 16:11:32 2014 -0700
Committer: Jesse Yates jya...@apache.org
Committed: Mon Jul 28 06:37:49 2014 -0700

--
 phoenix-core/pom.xml|  47 +--
 .../apache/phoenix/trace/BaseTracingTestIT.java | 117 ++
 .../phoenix/trace/DelegatingConnection.java | 328 +++
 .../phoenix/trace/DisableableMetricsWriter.java |  83 
 .../trace/Hadoop1TracingTestEnabler.java|  86 
 .../apache/phoenix/trace/PhoenixMetricImpl.java |  44 ++
 .../phoenix/trace/PhoenixMetricRecordImpl.java  |  71 
 .../trace/PhoenixTableMetricsWriterIT.java  | 119 ++
 .../apache/phoenix/trace/PhoenixTagImpl.java|  52 +++
 .../phoenix/trace/PhoenixTracingEndToEndIT.java | 401 +++
 .../apache/phoenix/trace/TraceReaderTest.java   | 181 +
 .../org/apache/phoenix/call/CallRunner.java |  66 +++
 .../org/apache/phoenix/call/CallWrapper.java|  29 ++
 .../coprocessor/BaseScannerRegionObserver.java  |  36 +-
 .../coprocessor/DelegateRegionScanner.java  |  78 
 .../apache/phoenix/execute/BasicQueryPlan.java  |  17 +-
 .../apache/phoenix/execute/MutationState.java   |  24 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |  39 ++
 .../phoenix/iterate/ParallelIterators.java  |   5 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  38 +-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  25 +-
 .../trace/PhoenixTableMetricsWriter.java| 255 
 .../org/apache/phoenix/trace/TraceReader.java   | 375 +
 .../apache/phoenix/trace/TracingIterator.java   |  58 +++
 .../trace/util/ConfigurationAdapter.java|  56 +++
 .../org/apache/phoenix/trace/util/NullSpan.java | 112 ++
 .../org/apache/phoenix/trace/util/Tracing.java  | 282 +
 .../phoenix/util/PhoenixContextExecutor.java|  23 ++
 .../java/org/apache/phoenix/util/QueryUtil.java |  29 ++
 .../test/resources/hadoop-metrics2.properties   |  25 ++
 .../src/test/resources/log4j.properties |   4 +-
 phoenix-hadoop-compat/pom.xml   |  31 +-
 .../org/apache/phoenix/metrics/MetricInfo.java  |  51 +++
 .../org/apache/phoenix/metrics/Metrics.java |  39 ++
 .../apache/phoenix/metrics/MetricsManager.java  |  58 +++
 .../apache/phoenix/metrics/MetricsWriter.java   |  31 ++
 .../phoenix/metrics/PhoenixAbstractMetric.java  |  30 ++
 .../phoenix/metrics/PhoenixMetricTag.java   |  27 ++
 .../phoenix/metrics/PhoenixMetricsRecord.java   |  35 ++
 .../phoenix/trace/PhoenixSpanReceiver.java  |  26 ++
 .../phoenix/trace/TestableMetricsWriter.java|  30 ++
 .../org/apache/phoenix/trace/TracingCompat.java |  94 +
 .../org/apache/phoenix/metrics/LoggingSink.java |  56 +++
 .../phoenix/metrics/TracingTestCompat.java  |  45 +++
 phoenix-hadoop2-compat/pom.xml  |  47 ++-
 .../phoenix/metrics/MetricsManagerImpl.java |  71 
 .../apache/phoenix/trace/MetricsInfoImpl.java   |  63 +++
 .../phoenix/trace/PhoenixMetricsWriter.java | 176 
 .../apache/phoenix/trace/TraceMetricSource.java | 192 +
 .../org.apache.phoenix.metrics.MetricsManager   |   1 +
 ...org.apache.phoenix.trace.PhoenixSpanReceiver |   1 +
 ...g.apache.phoenix.trace.TestableMetricsWriter |   1 +
 .../metrics2/impl/ExposedMetricCounterLong.java |  35 ++
 .../metrics2/impl/ExposedMetricsRecordImpl.java |  43 ++
 .../metrics2/lib/ExposedMetricsInfoImpl.java|  32 ++
 .../phoenix/trace/PhoenixMetricsWriterTest.java | 142 +++
 .../org/apache/phoenix/trace/TracingTest.java   |  34 ++
 pom.xml |  40 +-
 58 files changed, 4479 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f46c10/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml

Build failed in Jenkins: Phoenix | Master | Hadoop1 #300

2014-07-28 Thread Apache Jenkins Server
See https://builds.apache.org/job/Phoenix-master-hadoop1/300/changes

Changes:

[jyates] PHOENIX-177: Collect usage and performance metrics

--
[...truncated 4218 lines...]
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:771)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:742)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:711)
at org.apache.phoenix.query.BaseTest.initMiniCluster(BaseTest.java:480)
at org.apache.phoenix.query.BaseTest.setUpTestCluster(BaseTest.java:455)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.checkClusterInitialized(BaseHBaseManagedTimeIT.java:87)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.getUrl(BaseHBaseManagedTimeIT.java:58)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.doSetup(BaseHBaseManagedTimeIT.java:73)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24)
at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runners.Suite.runChild(Suite.java:127)
at org.junit.runners.Suite.runChild(Suite.java:26)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runner.JUnitCore.run(JUnitCore.java:160)
at org.junit.runner.JUnitCore.run(JUnitCore.java:138)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.createRequestAndRun(JUnitCoreWrapper.java:113)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.executeLazy(JUnitCoreWrapper.java:94)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.execute(JUnitCoreWrapper.java:58)
at 
org.apache.maven.surefire.junitcore.JUnitCoreProvider.invoke(JUnitCoreProvider.java:134)
at 
org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
at 
org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
at 
org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)

org.apache.phoenix.flume.RegexEventSerializerIT  Time elapsed: 2,101,478.694 
sec   FAILURE!
java.lang.AssertionError: null
at org.junit.Assert.fail(Assert.java:86)
at org.junit.Assert.assertTrue(Assert.java:41)
at org.junit.Assert.assertTrue(Assert.java:52)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.dropTables(BaseHBaseManagedTimeIT.java:99)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runners.Suite.runChild(Suite.java:127)
at org.junit.runners.Suite.runChild(Suite.java:26)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at 

git commit: PHOENIX-788 Support cast from/to DATE/TIME/TIMESTAMP to/from LONG/UNSIGNED_LONG

2014-07-28 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/3.0 3640902c3 - 9c85887af


PHOENIX-788 Support cast from/to DATE/TIME/TIMESTAMP to/from LONG/UNSIGNED_LONG


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9c85887a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9c85887a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9c85887a

Branch: refs/heads/3.0
Commit: 9c85887af4d48a7bac45171ddd2a33fbd310a440
Parents: 3640902
Author: James Taylor jtay...@salesforce.com
Authored: Mon Jul 28 11:48:51 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Mon Jul 28 11:48:51 2014 -0700

--
 .../org/apache/phoenix/end2end/BaseQueryIT.java | 196 +++
 .../apache/phoenix/end2end/CaseStatementIT.java |   2 +-
 .../apache/phoenix/end2end/CastAndCoerceIT.java |  98 +-
 .../end2end/ClientTimeArithmeticQueryIT.java|   2 +-
 .../org/apache/phoenix/end2end/GroupByIT.java   |   2 +-
 .../org/apache/phoenix/end2end/NotQueryIT.java  |   2 +-
 .../org/apache/phoenix/end2end/QueryIT.java | 149 +-
 .../org/apache/phoenix/end2end/ScanQueryIT.java |   2 +-
 .../function/CeilDecimalExpression.java |  17 +-
 .../function/FloorDecimalExpression.java|  17 +-
 .../function/RoundDecimalExpression.java|  24 ++-
 .../org/apache/phoenix/parse/CastParseNode.java |  18 +-
 .../org/apache/phoenix/parse/CeilParseNode.java |   2 +-
 .../apache/phoenix/parse/FloorParseNode.java|   2 +-
 .../apache/phoenix/parse/RoundParseNode.java|   6 +-
 .../org/apache/phoenix/schema/PDataType.java|  15 ++
 .../RoundFloorCeilExpressionsUnitTests.java |   5 +-
 17 files changed, 383 insertions(+), 176 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9c85887a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
new file mode 100644
index 000..f87e86c
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+
+
+/**
+ * 
+ * Basic tests for Phoenix JDBC implementation
+ *
+ * 
+ * @since 0.1
+ */
+
+@Category(ClientManagedTimeTest.class)
+@RunWith(Parameterized.class)
+public abstract class BaseQueryIT extends BaseClientManagedTimeIT {
+protected static final String tenantId = getOrganizationId();
+protected static final String 

git commit: PHOENIX-788 Support cast from/to DATE/TIME/TIMESTAMP to/from LONG/UNSIGNED_LONG

2014-07-28 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.0 e8def027a - d372c6591


PHOENIX-788 Support cast from/to DATE/TIME/TIMESTAMP to/from LONG/UNSIGNED_LONG


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d372c659
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d372c659
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d372c659

Branch: refs/heads/4.0
Commit: d372c6591d22e7240ef900cd14160be80ef2f73b
Parents: e8def02
Author: James Taylor jtay...@salesforce.com
Authored: Mon Jul 28 11:48:51 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Mon Jul 28 12:41:40 2014 -0700

--
 .../org/apache/phoenix/end2end/BaseQueryIT.java | 138 +++
 .../apache/phoenix/end2end/CaseStatementIT.java |   2 +-
 .../apache/phoenix/end2end/CastAndCoerceIT.java |  98 -
 .../end2end/ClientTimeArithmeticQueryIT.java|   2 +-
 .../org/apache/phoenix/end2end/GroupByIT.java   |   2 +-
 .../org/apache/phoenix/end2end/NotQueryIT.java  |   2 +-
 .../org/apache/phoenix/end2end/QueryIT.java |  96 +
 .../org/apache/phoenix/end2end/ScanQueryIT.java |   2 +-
 .../function/CeilDecimalExpression.java |  17 ++-
 .../function/FloorDecimalExpression.java|  17 ++-
 .../function/RoundDecimalExpression.java|  24 +++-
 .../org/apache/phoenix/parse/CastParseNode.java |  18 ++-
 .../org/apache/phoenix/parse/CeilParseNode.java |   2 +-
 .../apache/phoenix/parse/FloorParseNode.java|   2 +-
 .../apache/phoenix/parse/RoundParseNode.java|   6 +-
 .../org/apache/phoenix/schema/PDataType.java|  15 ++
 .../RoundFloorCeilExpressionsUnitTests.java |   5 +-
 17 files changed, 325 insertions(+), 123 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d372c659/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
new file mode 100644
index 000..d736612
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+
+
+/**
+ * 
+ * Basic tests for Phoenix JDBC implementation
+ *
+ * 
+ * @since 0.1
+ */
+
+@Category(ClientManagedTimeTest.class)
+@RunWith(Parameterized.class)
+public abstract class BaseQueryIT extends BaseClientManagedTimeIT {
+protected static final String tenantId = getOrganizationId();
+protected static final String ATABLE_INDEX_NAME = ATABLE_IDX;
+protected static final long BATCH_SIZE = 3;
+
+@BeforeClass
+@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
+public static void 

Apache-Phoenix | 3.0 | Hadoop1 | Build Successful

2014-07-28 Thread Apache Jenkins Server
3.0 branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf/phoenix.git

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-3.0-hadoop1/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-3.0-hadoop1/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-788 Support cast from/to DATE/TIME/TIMESTAMP to/from LONG/UNSIGNED_LONG



git commit: New test to repro skip scan after manual splits

2014-07-28 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/3.0 9c85887af - 507476b9b


New test to repro skip scan after manual splits


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/507476b9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/507476b9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/507476b9

Branch: refs/heads/3.0
Commit: 507476b9bcf4c2f2edb2d84690508c0e45788818
Parents: 9c85887
Author: James Taylor jtay...@salesforce.com
Authored: Mon Jul 28 13:00:23 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Mon Jul 28 13:00:23 2014 -0700

--
 .../end2end/SkipScanAfterManualSplit.java   | 242 +++
 1 file changed, 242 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/507476b9/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplit.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplit.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplit.java
new file mode 100644
index 000..99405f2
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplit.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+@Category(HBaseManagedTimeTest.class)
+public class SkipScanAfterManualSplit extends BaseHBaseManagedTimeIT {
+
+private static final int BATCH_SIZE = 25;
+private static final int MAX_FILESIZE = 1024 * 10;
+private static final int PAYLOAD_SIZE = 1024;
+private static final String PAYLOAD;
+static {
+StringBuilder buf = new StringBuilder();
+for (int i = 0; i  PAYLOAD_SIZE; i++) {
+buf.append('a');
+}
+PAYLOAD = buf.toString();
+}
+//private static final String SPLIT_POINT = j;
+private static final String TABLE_NAME = S;
+private static final byte[] TABLE_NAME_BYTES = Bytes.toBytes(TABLE_NAME);
+private static final int MIN_CHAR = 'a';
+private static final int MAX_CHAR = 'z';
+//private static final int PERC_TO_SELECT = 4;
+private static final Random RAND = new Random();
+
+@BeforeClass
+@Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
+public static void doSetup() throws Exception {
+MapString,String props = Maps.newHashMapWithExpectedSize(2);
+props.put(QueryServices.THREAD_POOL_SIZE_ATTRIB, Integer.toString(32));
+props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1000));
+setUpTestDriver(getUrl(), new 
ReadOnlyProps(props.entrySet().iterator()));
+}
+
+private static void initTable() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+

Build failed in Jenkins: Phoenix | 4.0 | Hadoop1 #234

2014-07-28 Thread Apache Jenkins Server
See https://builds.apache.org/job/Phoenix-4.0-hadoop1/234/changes

Changes:

[jtaylor] PHOENIX-788 Support cast from/to DATE/TIME/TIMESTAMP to/from 
LONG/UNSIGNED_LONG

--
[...truncated 796 lines...]
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:846)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:771)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:742)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:711)
at org.apache.phoenix.query.BaseTest.initMiniCluster(BaseTest.java:480)
at org.apache.phoenix.query.BaseTest.setUpTestCluster(BaseTest.java:455)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.checkClusterInitialized(BaseHBaseManagedTimeIT.java:87)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.getUrl(BaseHBaseManagedTimeIT.java:58)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.doSetup(BaseHBaseManagedTimeIT.java:73)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24)
at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runners.Suite.runChild(Suite.java:127)
at org.junit.runners.Suite.runChild(Suite.java:26)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runner.JUnitCore.run(JUnitCore.java:160)
at org.junit.runner.JUnitCore.run(JUnitCore.java:138)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.createRequestAndRun(JUnitCoreWrapper.java:113)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.executeLazy(JUnitCoreWrapper.java:94)
at 
org.apache.maven.surefire.junitcore.JUnitCoreWrapper.execute(JUnitCoreWrapper.java:58)
at 
org.apache.maven.surefire.junitcore.JUnitCoreProvider.invoke(JUnitCoreProvider.java:134)
at 
org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
at 
org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
at 
org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)

org.apache.phoenix.flume.RegexEventSerializerIT  Time elapsed: 2,122,991.545 
sec   FAILURE!
java.lang.AssertionError: null
at org.junit.Assert.fail(Assert.java:86)
at org.junit.Assert.assertTrue(Assert.java:41)
at org.junit.Assert.assertTrue(Assert.java:52)
at 
org.apache.phoenix.end2end.BaseHBaseManagedTimeIT.dropTables(BaseHBaseManagedTimeIT.java:99)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at org.junit.runners.Suite.runChild(Suite.java:127)
at org.junit.runners.Suite.runChild(Suite.java:26)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
at 

Apache-Phoenix | 3.0 | Hadoop1 | Build Successful

2014-07-28 Thread Apache Jenkins Server
3.0 branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf/phoenix.git

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-3.0-hadoop1/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-3.0-hadoop1/lastCompletedBuild/testReport/

Changes
[jtaylor] New test to repro skip scan after manual splits



Build failed in Jenkins: Phoenix | 3.0 | Hadoop1 #162

2014-07-28 Thread Apache Jenkins Server
See https://builds.apache.org/job/Phoenix-3.0-hadoop1/162/changes

Changes:

[jtaylor] New test to repro skip scan after manual splits

[jtaylor] New test to repro skip scan after manual splits

--
[...truncated 1 lines...]
  
QueryDatabaseMetaDataITBaseClientManagedTimeIT.doSetup:75-BaseClientManagedTimeIT.getUrl:59-BaseClientManagedTimeIT.checkClusterInitialized:89-BaseTest.setUpTestCluster:449-BaseTest.initMiniCluster:491
 ? Runtime
  
GroupByCaseITBaseClientManagedTimeIT.doSetup:75-BaseClientManagedTimeIT.getUrl:59-BaseClientManagedTimeIT.checkClusterInitialized:89-BaseTest.setUpTestCluster:449-BaseTest.initMiniCluster:491
 ? Runtime
  
ScanQueryITBaseQueryIT.doSetup:89-BaseClientManagedTimeIT.getUrl:59-BaseClientManagedTimeIT.checkClusterInitialized:89-BaseTest.setUpTestCluster:449-BaseTest.initMiniCluster:491
 ? Runtime

Tests run: 97, Failures: 48, Errors: 49, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.17:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---
[INFO] Failsafe report directory: 
https://builds.apache.org/job/Phoenix-3.0-hadoop1/ws/phoenix-core/target/failsafe-reports
[INFO] parallel='none', perCoreThreadCount=true, threadCount=0, 
useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, 
threadCountMethods=0, parallelOptimized=true

---
 T E S T S
---

---
 T E S T S
---
Tests run: 1, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 2,124,572.003 
sec  FAILURE! - in org.apache.phoenix.end2end.ContextClassloaderIT
org.apache.phoenix.end2end.ContextClassloaderIT  Time elapsed: 2,124,572.002 
sec   ERROR!
java.net.UnknownHostException: asf901.ygridcore.net: asf901.ygridcore.net: Name 
or service not known
at java.net.Inet6AddressImpl.lookupAllHostAddr(Native Method)
at java.net.InetAddress$1.lookupAllHostAddr(InetAddress.java:894)
at 
java.net.InetAddress.getAddressesFromNameService(InetAddress.java:1286)
at java.net.InetAddress.getLocalHost(InetAddress.java:1462)
at 
org.apache.hadoop.security.SecurityUtil.getLocalHostName(SecurityUtil.java:252)
at org.apache.hadoop.security.SecurityUtil.login(SecurityUtil.java:270)
at 
org.apache.hadoop.hdfs.server.datanode.DataNode.init(DataNode.java:291)
at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1582)
at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1521)
at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1496)
at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:417)
at org.apache.hadoop.hdfs.MiniDFSCluster.init(MiniDFSCluster.java:280)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniDFSCluster(HBaseTestingUtility.java:452)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:620)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:576)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:545)
at 
org.apache.phoenix.end2end.ContextClassloaderIT.setUpBeforeClass(ContextClassloaderIT.java:62)

Tests run: 1, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 2,124,572.144 
sec  FAILURE! - in 
org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT
org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT  
Time elapsed: 2,124,572.143 sec   ERROR!
java.net.UnknownHostException: asf901.ygridcore.net: asf901.ygridcore.net: Name 
or service not known
at java.net.Inet6AddressImpl.lookupAllHostAddr(Native Method)
at java.net.InetAddress$1.lookupAllHostAddr(InetAddress.java:894)
at 
java.net.InetAddress.getAddressesFromNameService(InetAddress.java:1286)
at java.net.InetAddress.getLocalHost(InetAddress.java:1462)
at 
org.apache.hadoop.security.SecurityUtil.getLocalHostName(SecurityUtil.java:252)
at org.apache.hadoop.security.SecurityUtil.login(SecurityUtil.java:270)
at 
org.apache.hadoop.hdfs.server.datanode.DataNode.init(DataNode.java:291)
at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1582)
at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1521)
at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1496)
at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:417)
at org.apache.hadoop.hdfs.MiniDFSCluster.init(MiniDFSCluster.java:280)
at 
org.apache.hadoop.hbase.HBaseTestingUtility.startMiniDFSCluster(HBaseTestingUtility.java:452)
   

git commit: Simplify SkipScanAfterManualSplitIT to only use two keys that fail

2014-07-28 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/3.0 794ab41f6 - ef27a9f56


Simplify SkipScanAfterManualSplitIT to only use two keys that fail


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ef27a9f5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ef27a9f5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ef27a9f5

Branch: refs/heads/3.0
Commit: ef27a9f56e6bca2f20a87c51089136841942adca
Parents: 794ab41
Author: James Taylor jtay...@salesforce.com
Authored: Mon Jul 28 14:42:25 2014 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Mon Jul 28 14:42:25 2014 -0700

--
 .../org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ef27a9f5/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
index 3daef19..4fdf4c5 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
@@ -111,7 +111,7 @@ public class SkipScanAfterManualSplitIT extends 
BaseHBaseManagedTimeIT {
 }
 System.out.println(Region boundaries:\n + boundaries);
 }
-
+
 @Ignore
 @Test
 public void testManualSplit() throws Exception {
@@ -134,8 +134,8 @@ public class SkipScanAfterManualSplitIT extends 
BaseHBaseManagedTimeIT {
 // table is accessed
 assertEquals(nRegions, nInitialRegions);
 
-int nRows = 25;
-String query = SELECT count(*) FROM S WHERE a IN 
('tl','jt','ju','rj','hj','vt','hh','br','ga','vn','th','sv','dl','mj','is','op','ug','sq','mv','qe','kq','xy','ek','aa','ae');
+int nRows = 2;
+String query = SELECT count(*) FROM S WHERE a IN ('tl','jt');
 ResultSet rs1 = conn.createStatement().executeQuery(query);
 assertTrue(rs1.next());
 traceRegionBoundaries(services);