Apache-Phoenix | Master | Hadoop1 | Build Successful

2014-10-05 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-master-hadoop1/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-master-hadoop1/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-1320 Update stats atomically

[jtaylor] PHOENIX-1323 Use utility method to get HTableInterface for scans from coprocessor

[jtaylor] PHOENIX-1315 Optimize query for Pig loader

[jtaylor] PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in ScanRanges intersect methods

[jtaylor] PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars Hofhansl)



Apache-Phoenix | 4.0 | Hadoop1 | Build Successful

2014-10-05 Thread Apache Jenkins Server
4.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.0-hadoop1/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-1320 Update stats atomically

[jtaylor] PHOENIX-1323 Use utility method to get HTableInterface for scans from coprocessor

[jtaylor] PHOENIX-1315 Optimize query for Pig loader

[jtaylor] PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in ScanRanges intersect methods

[jtaylor] PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars Hofhansl)



[1/5] git commit: PHOENIX-1320 Update stats atomically

2014-10-05 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master b1be0f8b8 -> 166671c89


PHOENIX-1320 Update stats atomically


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8621b7c9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8621b7c9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8621b7c9

Branch: refs/heads/master
Commit: 8621b7c9b7d2ca89643fdabf0948e30781326a27
Parents: b1be0f8
Author: James Taylor 
Authored: Sat Oct 4 16:54:36 2014 -0700
Committer: James Taylor 
Committed: Sun Oct 5 18:42:45 2014 -0700

--
 .../UngroupedAggregateRegionObserver.java   | 58 ---
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |  3 +-
 .../query/ConnectionQueryServicesImpl.java  | 44 ---
 .../apache/phoenix/query/QueryConstants.java|  5 +-
 .../schema/stat/StatisticsCollector.java| 66 +
 .../phoenix/schema/stat/StatisticsScanner.java  | 34 ++---
 .../phoenix/schema/stat/StatisticsTable.java| 78 
 .../phoenix/schema/stat/StatisticsUtils.java|  8 --
 .../org/apache/phoenix/util/SchemaUtil.java |  5 ++
 9 files changed, 166 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8621b7c9/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 0bf2710..4ddb322 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -34,8 +34,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
@@ -83,7 +81,6 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.stat.StatisticsCollector;
-import org.apache.phoenix.schema.stat.StatisticsTable;
 import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
@@ -116,8 +113,6 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver{
 public static final String EMPTY_CF = "EmptyCF";
 private static final Logger logger = 
LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
 private KeyValueBuilder kvBuilder;
-private static final Log LOG = 
LogFactory.getLog(UngroupedAggregateRegionObserver.class);
-private StatisticsTable statsTable = null;
 
 @Override
 public void start(CoprocessorEnvironment e) throws IOException {
@@ -125,8 +120,6 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver{
 // Can't use ClientKeyValueBuilder on server-side because the memstore 
expects to
 // be able to get a single backing buffer for a KeyValue.
 this.kvBuilder = GenericKeyValueBuilder.INSTANCE;
-String name = 
((RegionCoprocessorEnvironment)e).getRegion().getTableDesc().getTableName().getNameAsString();
-this.statsTable = 
StatisticsTable.getStatisticsTableForCoprocessor(e.getConfiguration(), name);
 }
 
 private static void commitBatch(HRegion region, List mutations, 
byte[] indexUUID) throws IOException {
@@ -161,12 +154,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver{
 @Override
 protected RegionScanner doPostScannerOpen(final 
ObserverContext c, final Scan scan, final 
RegionScanner s) throws IOException {
 int offset = 0;
-boolean isAnalyze = false;
 HRegion region = c.getEnvironment().getRegion();
 StatisticsCollector stats = null;
-if(ScanUtil.isAnalyzeTable(scan) && statsTable != null) {
-stats = new StatisticsCollector(statsTable, 
c.getEnvironment().getConfiguration());
-isAnalyze = true;
+if(ScanUtil.isAnalyzeTable(scan)) {
+// Let this throw, as this scan is being done for the sole purpose 
of collecting stats
+stats = new StatisticsCollector(c.getEnvironment(), 
region.getRegionInfo().getTable().getNameAsString());
 }
 if (ScanUtil.isLocalIndex(scan)) {
 /*
@@ -260,7 +252,7 @@ public class UngroupedAggregateRegionO

[5/5] git commit: PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars Hofhansl)

2014-10-05 Thread jamestaylor
PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars 
Hofhansl)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/166671c8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/166671c8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/166671c8

Branch: refs/heads/master
Commit: 166671c89b296c8c6a8369114fd3e0af797b8887
Parents: 846a8b5
Author: James Taylor 
Authored: Sun Oct 5 13:26:52 2014 -0700
Committer: James Taylor 
Committed: Sun Oct 5 18:47:51 2014 -0700

--
 .../phoenix/end2end/CoalesceFunctionIT.java | 67 
 .../apache/phoenix/end2end/ReverseScanIT.java   |  2 +-
 ...ipRangeParallelIteratorRegionSplitterIT.java |  3 +-
 .../end2end/TenantSpecificTablesDDLIT.java  |  2 +-
 .../phoenix/end2end/ToCharFunctionIT.java   |  4 +-
 .../phoenix/end2end/ToNumberFunctionIT.java |  4 +-
 .../end2end/UpsertSelectAutoCommitIT.java   | 23 +++
 .../salted/SaltedTableVarLengthRowKeyIT.java|  8 +--
 .../apache/phoenix/compile/FromCompiler.java| 32 +++---
 .../apache/phoenix/compile/UpsertCompiler.java  | 19 ++
 .../apache/phoenix/execute/BaseQueryPlan.java   |  6 --
 11 files changed, 104 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/166671c8/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
index 57599e6..45fcb48 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
@@ -67,7 +67,7 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void coalesceWithSumExplicitLong() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
+String ddl = "CREATE TABLE TEST_COALESCE("
 + "ID BIGINT NOT NULL, "
 + "COUNT BIGINT "
 + "CONSTRAINT pk PRIMARY KEY(ID))";
@@ -91,7 +91,7 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void coalesceWithSumImplicitLong() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
+String ddl = "CREATE TABLE TEST_COALESCE("
 + "ID BIGINT NOT NULL, "
 + "COUNT BIGINT "
 + "CONSTRAINT pk PRIMARY KEY(ID))";
@@ -115,7 +115,7 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void coalesceWithSecondParamAsExpression() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
+String ddl = "CREATE TABLE TEST_COALESCE("
 + "ID BIGINT NOT NULL, "
 + "COUNT BIGINT "
 + "CONSTRAINT pk PRIMARY KEY(ID))";
@@ -139,7 +139,7 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void nonTypedSecondParameterLong() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
+String ddl = "CREATE TABLE TEST_COALESCE("
 + "ID BIGINT NOT NULL, "
 + "COUNT BIGINT " //first parameter to coalesce
 + "CONSTRAINT pk PRIMARY KEY(ID))";
@@ -163,47 +163,32 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void nonTypedSecondParameterUnsignedDataTypes() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String[] dataTypes = {
-"UNSIGNED_INT",
-"UNSIGNED_LONG",
-"UNSIGNED_TINYINT",
-"UNSIGNED_SMALLINT",
-"UNSIGNED_FLOAT",
-"UNSIGNED_DOUBLE",
-"UNSIGNED_TIME",
-"UNSIGNED_DATE",
-"UNSIGNED_TIMESTAMP"
-};
-
-for (String dataType : dataTypes) {
-
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
-+ "ID BIGINT NOT NULL, "
-+ "COUNT " + dataType //first parameter to coalesce
-+ "CONSTRAINT pk PRIMARY KEY(ID))";
-conn.createStatement().execute(ddl);
-
-conn.createStatement().execute("UPSERT INTO TEST_COALESCE(ID, 
COUNT) VALUES(2, null)");
-   

[4/5] git commit: PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in ScanRanges intersect methods

2014-10-05 Thread jamestaylor
PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in 
ScanRanges intersect methods


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/846a8b52
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/846a8b52
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/846a8b52

Branch: refs/heads/master
Commit: 846a8b52eaf12245352cb64da72e1d8c17f4a8c9
Parents: 56a0c85
Author: James Taylor 
Authored: Sun Oct 5 10:48:11 2014 -0700
Committer: James Taylor 
Committed: Sun Oct 5 18:47:26 2014 -0700

--
 .../apache/phoenix/cache/ServerCacheClient.java |  3 ++-
 .../org/apache/phoenix/compile/ScanRanges.java  | 28 +---
 .../phoenix/iterate/ParallelIterators.java  |  4 +--
 .../compile/ScanRangesIntersectTest.java|  2 +-
 .../apache/phoenix/compile/ScanRangesTest.java  |  2 +-
 5 files changed, 24 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/846a8b52/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index f22f874..ba7d265 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -172,7 +172,8 @@ public class ServerCacheClient {
 if ( ! servers.contains(entry) && 
 keyRanges.intersects(regionStartKey, regionEndKey,
 cacheUsingTable.getIndexType() == 
IndexType.LOCAL ? 
-ScanUtil.getRowKeyOffset(regionStartKey, 
regionEndKey) : 0)) {  // Call RPC once per server
+ScanUtil.getRowKeyOffset(regionStartKey, 
regionEndKey) : 0, true)) {  
+// Call RPC once per server
 servers.add(entry);
 if (LOG.isDebugEnabled()) 
{LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, 
connection));}
 final byte[] key = entry.getRegionInfo().getStartKey();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/846a8b52/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
index 4591bdb..923bcf3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
@@ -207,7 +207,7 @@ public class ScanRanges {
 return temp;
 }
 
-public Scan intersectScan(Scan scan, final byte[] originalStartKey, final 
byte[] originalStopKey, final int keyOffset) {
+public Scan intersectScan(Scan scan, final byte[] originalStartKey, final 
byte[] originalStopKey, final int keyOffset, boolean crossesRegionBoundary) {
 byte[] startKey = originalStartKey;
 byte[] stopKey = originalStopKey;
 if (stopKey.length > 0 && Bytes.compareTo(startKey, stopKey) >= 0) { 
@@ -218,16 +218,22 @@ public class ScanRanges {
 // salt bytes in that case.
 final int scanKeyOffset = this.isSalted && !this.isPointLookup ? 
SaltingUtil.NUM_SALTING_BYTES : 0;
 assert (scanKeyOffset == 0 || keyOffset == 0);
-// Offset for startKey/stopKey. Either 1 for salted tables or the 
prefix length
-// of the current region for local indexes.
+// Total offset for startKey/stopKey. Either 1 for salted tables or 
the prefix length
+// of the current region for local indexes. We'll never have a case 
where a table is
+// both salted and local.
 final int totalKeyOffset = scanKeyOffset + keyOffset;
-// In this case, we've crossed the "prefix" boundary and should 
consider everything after the startKey
-// This prevents us from having to prefix the key prior to knowing 
whether or not there may be an
-// intersection.
 byte[] prefixBytes = ByteUtil.EMPTY_BYTE_ARRAY;
 if (totalKeyOffset > 0) {
 prefixBytes = ScanUtil.getPrefix(startKey, totalKeyOffset);
-if (ScanUtil.crossesPrefixBoundary(stopKey, prefixBytes, 
totalKeyOffset)) {
+/*
+ * If our startKey to stopKey crosses a region boundary consider 
everything after the startKey as our scan
+ * is always done within a single region. This prevents us from 
having to prefix the key prior to knowing

[3/5] git commit: PHOENIX-1315 Optimize query for Pig loader

2014-10-05 Thread jamestaylor
PHOENIX-1315 Optimize query for Pig loader

Conflicts:

phoenix-pig/src/main/java/org/apache/phoenix/pig/hadoop/PhoenixRecordReader.java


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/56a0c852
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/56a0c852
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/56a0c852

Branch: refs/heads/master
Commit: 56a0c8529c3dd0b447dca3e00125a5d480ffd4ef
Parents: f551f85
Author: James Taylor 
Authored: Sun Oct 5 09:53:14 2014 -0700
Committer: James Taylor 
Committed: Sun Oct 5 18:46:53 2014 -0700

--
 .../phoenix/end2end/EvaluationOfORIT.java   |  9 +--
 .../apache/phoenix/end2end/ReverseScanIT.java   |  4 +-
 ...ipRangeParallelIteratorRegionSplitterIT.java |  5 ++
 .../index/balancer/IndexLoadBalancerIT.java | 13 +++--
 .../org/apache/phoenix/compile/QueryPlan.java   |  3 +
 .../apache/phoenix/execute/AggregatePlan.java   |  6 ++
 .../phoenix/execute/DegenerateQueryPlan.java| 12 +++-
 .../apache/phoenix/execute/HashJoinPlan.java|  5 ++
 .../org/apache/phoenix/execute/ScanPlan.java|  8 +++
 .../phoenix/iterate/ConcatResultIterator.java   | 29 ++
 .../iterate/LookAheadResultIterator.java| 21 +++
 .../phoenix/iterate/ParallelIterators.java  | 39 +++--
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  6 ++
 .../phoenix/pig/PhoenixHBaseLoaderIT.java   | 24 
 .../phoenix/pig/hadoop/PhoenixInputFormat.java  | 13 +++--
 .../phoenix/pig/hadoop/PhoenixInputSplit.java   | 60 +++-
 .../phoenix/pig/hadoop/PhoenixRecordReader.java | 25 
 17 files changed, 196 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/56a0c852/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
index 052ff43..0e59542 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
@@ -28,21 +28,22 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Properties;
 
+import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-@Category(BaseHBaseManagedTimeIT.class)
+@Category(HBaseManagedTimeTest.class)
 public class EvaluationOfORIT extends BaseHBaseManagedTimeIT{

@Test
public void testPKOrNotPKInOREvaluation() throws SQLException {
-   Properties props = new Properties(TEST_PROPERTIES);
+   Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props); 

conn.setAutoCommit(false);

-String create = "CREATE TABLE DIE ( ID INTEGER NOT NULL PRIMARY 
KEY,NAME VARCHAR(50) NOT NULL)";
+String create = "CREATE TABLE DIE ( ID INTEGER NOT NULL PRIMARY 
KEY,NAME VARCHAR(50))";
 PreparedStatement createStmt = conn.prepareStatement(create);
-createStmt.executeUpdate();
+createStmt.execute();
 PreparedStatement stmt = conn.prepareStatement(
 "upsert into " +
 "DIE VALUES (?, ?)");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/56a0c852/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
index f7409a9..f738773 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
@@ -47,8 +47,8 @@ import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
-@Category(HBaseManagedTimeTest.class)
-public class ReverseScanIT extends BaseClientManagedTimeIT {
+@Category(ClientManagedTimeTest.class)
+public class ReverseScanIT extends BaseHBaseManagedTimeIT {
 @BeforeClass
 @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
 public static void doSetup() throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/56a0c852/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangePar

[2/5] git commit: PHOENIX-1323 Use utility method to get HTableInterface for scans from coprocessor

2014-10-05 Thread jamestaylor
PHOENIX-1323 Use utility method to get HTableInterface for scans from 
coprocessor


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f551f85a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f551f85a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f551f85a

Branch: refs/heads/master
Commit: f551f85ae07d6317a50aab3aacd4b445ce482c00
Parents: 8621b7c
Author: James Taylor 
Authored: Sat Oct 4 18:12:18 2014 -0700
Committer: James Taylor 
Committed: Sun Oct 5 18:43:05 2014 -0700

--
 .../coprocessor/MetaDataEndpointImpl.java   | 33 +++-
 .../org/apache/phoenix/util/ServerUtil.java | 23 +-
 2 files changed, 34 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f551f85a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index fdd9b14..372b0b1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -73,9 +73,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -692,13 +690,10 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 multiTenant, viewType, viewIndexId, indexType, stats);
 }
 
-private PTableStats updateStatsInternal(byte[] tableNameBytes)
-throws IOException {
-HTable statsHTable = null;
+private PTableStats updateStatsInternal(byte[] tableNameBytes) throws 
IOException {
 ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+HTableInterface statsHTable = 
ServerUtil.getHTableForCoprocessorScan(env, 
PhoenixDatabaseMetaData.SYSTEM_STATS_NAME);
 try {
-// Can we do a new HTable instance here? Or get it from a pool or 
cache of these instances?
-statsHTable = new HTable(this.env.getConfiguration(), 
PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES);
 Scan s = newTableRowsScan(tableNameBytes);
 s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, 
PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
 ResultScanner scanner = statsHTable.getScanner(s);
@@ -745,9 +740,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
 throw new IOException(e);
 }
 } finally {
-if (statsHTable != null) {
-statsHTable.close();
-}
+statsHTable.close();
 }
 return PTableStatsImpl.NO_STATS;
 }
@@ -971,13 +964,12 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 // TableName systemCatalogTableName = 
region.getTableDesc().getTableName();
 // HTableInterface hTable = env.getTable(systemCatalogTableName);
 // These deprecated calls work around the issue
-HTablePool pool = new HTablePool (env.getConfiguration(),1);
+HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
 try {
-HTableInterface hTable = 
pool.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
-ResultScanner scanner = hTable.getScanner(scan);
 boolean allViewsInCurrentRegion = true;
 int numOfChildViews = 0;
 List results = Lists.newArrayList();
+ResultScanner scanner = hTable.getScanner(scan);
 try {
 for (Result result = scanner.next(); (result != null); result 
= scanner.next()) {
 numOfChildViews++;
@@ -990,17 +982,16 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 results.add(result);
 }
+TableViewFinderResult tableViewFinderResult = new 
TableViewFinderResult(results);
+if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
+tableViewFinderResult.setAllViewsNotInSingleRegion();
+}
+return t

[2/5] git commit: PHOENIX-1323 Use utility method to get HTableInterface for scans from coprocessor

2014-10-05 Thread jamestaylor
PHOENIX-1323 Use utility method to get HTableInterface for scans from 
coprocessor


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8840af66
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8840af66
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8840af66

Branch: refs/heads/4.0
Commit: 8840af6607935054982fc6a6b0b8aad2081a716c
Parents: 8c054b2
Author: James Taylor 
Authored: Sat Oct 4 18:12:18 2014 -0700
Committer: James Taylor 
Committed: Sat Oct 4 18:12:18 2014 -0700

--
 .../coprocessor/MetaDataEndpointImpl.java   | 33 +++-
 .../org/apache/phoenix/util/ServerUtil.java | 23 +-
 2 files changed, 34 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8840af66/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index aa77882..6a4f69b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -73,9 +73,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -692,13 +690,10 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 multiTenant, viewType, viewIndexId, indexType, stats);
 }
 
-private PTableStats updateStatsInternal(byte[] tableNameBytes)
-throws IOException {
-HTable statsHTable = null;
+private PTableStats updateStatsInternal(byte[] tableNameBytes) throws 
IOException {
 ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+HTableInterface statsHTable = 
ServerUtil.getHTableForCoprocessorScan(env, 
PhoenixDatabaseMetaData.SYSTEM_STATS_NAME);
 try {
-// Can we do a new HTable instance here? Or get it from a pool or 
cache of these instances?
-statsHTable = new HTable(this.env.getConfiguration(), 
PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES);
 Scan s = newTableRowsScan(tableNameBytes);
 s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, 
PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
 ResultScanner scanner = statsHTable.getScanner(s);
@@ -745,9 +740,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
 throw new IOException(e);
 }
 } finally {
-if (statsHTable != null) {
-statsHTable.close();
-}
+statsHTable.close();
 }
 return PTableStatsImpl.NO_STATS;
 }
@@ -970,13 +963,12 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 // TableName systemCatalogTableName = 
region.getTableDesc().getTableName();
 // HTableInterface hTable = env.getTable(systemCatalogTableName);
 // These deprecated calls work around the issue
-HTablePool pool = new HTablePool (env.getConfiguration(),1);
+HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
 try {
-HTableInterface hTable = 
pool.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
-ResultScanner scanner = hTable.getScanner(scan);
 boolean allViewsInCurrentRegion = true;
 int numOfChildViews = 0;
 List results = Lists.newArrayList();
+ResultScanner scanner = hTable.getScanner(scan);
 try {
 for (Result result = scanner.next(); (result != null); result 
= scanner.next()) {
 numOfChildViews++;
@@ -989,17 +981,16 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 results.add(result);
 }
+TableViewFinderResult tableViewFinderResult = new 
TableViewFinderResult(results);
+if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
+tableViewFinderResult.setAllViewsNotInSingleRegion();
+}
+return tabl

[4/5] git commit: PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in ScanRanges intersect methods

2014-10-05 Thread jamestaylor
PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in 
ScanRanges intersect methods


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c9101f82
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c9101f82
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c9101f82

Branch: refs/heads/4.0
Commit: c9101f827bf0ec0c76eedd6f91c746d9f4864506
Parents: 5df8d1e
Author: James Taylor 
Authored: Sun Oct 5 10:48:11 2014 -0700
Committer: James Taylor 
Committed: Sun Oct 5 10:48:11 2014 -0700

--
 .../apache/phoenix/cache/ServerCacheClient.java |  3 ++-
 .../org/apache/phoenix/compile/ScanRanges.java  | 28 +---
 .../phoenix/iterate/ParallelIterators.java  |  4 +--
 .../compile/ScanRangesIntersectTest.java|  2 +-
 .../apache/phoenix/compile/ScanRangesTest.java  |  2 +-
 5 files changed, 24 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9101f82/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index f22f874..ba7d265 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -172,7 +172,8 @@ public class ServerCacheClient {
 if ( ! servers.contains(entry) && 
 keyRanges.intersects(regionStartKey, regionEndKey,
 cacheUsingTable.getIndexType() == 
IndexType.LOCAL ? 
-ScanUtil.getRowKeyOffset(regionStartKey, 
regionEndKey) : 0)) {  // Call RPC once per server
+ScanUtil.getRowKeyOffset(regionStartKey, 
regionEndKey) : 0, true)) {  
+// Call RPC once per server
 servers.add(entry);
 if (LOG.isDebugEnabled()) 
{LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, 
connection));}
 final byte[] key = entry.getRegionInfo().getStartKey();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9101f82/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
index 4591bdb..923bcf3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
@@ -207,7 +207,7 @@ public class ScanRanges {
 return temp;
 }
 
-public Scan intersectScan(Scan scan, final byte[] originalStartKey, final 
byte[] originalStopKey, final int keyOffset) {
+public Scan intersectScan(Scan scan, final byte[] originalStartKey, final 
byte[] originalStopKey, final int keyOffset, boolean crossesRegionBoundary) {
 byte[] startKey = originalStartKey;
 byte[] stopKey = originalStopKey;
 if (stopKey.length > 0 && Bytes.compareTo(startKey, stopKey) >= 0) { 
@@ -218,16 +218,22 @@ public class ScanRanges {
 // salt bytes in that case.
 final int scanKeyOffset = this.isSalted && !this.isPointLookup ? 
SaltingUtil.NUM_SALTING_BYTES : 0;
 assert (scanKeyOffset == 0 || keyOffset == 0);
-// Offset for startKey/stopKey. Either 1 for salted tables or the 
prefix length
-// of the current region for local indexes.
+// Total offset for startKey/stopKey. Either 1 for salted tables or 
the prefix length
+// of the current region for local indexes. We'll never have a case 
where a table is
+// both salted and local.
 final int totalKeyOffset = scanKeyOffset + keyOffset;
-// In this case, we've crossed the "prefix" boundary and should 
consider everything after the startKey
-// This prevents us from having to prefix the key prior to knowing 
whether or not there may be an
-// intersection.
 byte[] prefixBytes = ByteUtil.EMPTY_BYTE_ARRAY;
 if (totalKeyOffset > 0) {
 prefixBytes = ScanUtil.getPrefix(startKey, totalKeyOffset);
-if (ScanUtil.crossesPrefixBoundary(stopKey, prefixBytes, 
totalKeyOffset)) {
+/*
+ * If our startKey to stopKey crosses a region boundary consider 
everything after the startKey as our scan
+ * is always done within a single region. This prevents us from 
having to prefix the key prior to knowing
+ 

[1/5] git commit: PHOENIX-1320 Update stats atomically

2014-10-05 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.0 e0996ade7 -> e49e8dcfb


PHOENIX-1320 Update stats atomically


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8c054b2a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8c054b2a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8c054b2a

Branch: refs/heads/4.0
Commit: 8c054b2ae9f59c9430d75b0bf1b1c269714db395
Parents: e0996ad
Author: James Taylor 
Authored: Sat Oct 4 16:54:36 2014 -0700
Committer: James Taylor 
Committed: Sat Oct 4 16:54:36 2014 -0700

--
 .../UngroupedAggregateRegionObserver.java   | 58 ---
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |  3 +-
 .../query/ConnectionQueryServicesImpl.java  | 44 ---
 .../apache/phoenix/query/QueryConstants.java|  5 +-
 .../schema/stat/StatisticsCollector.java| 66 +
 .../phoenix/schema/stat/StatisticsScanner.java  | 34 ++---
 .../phoenix/schema/stat/StatisticsTable.java| 78 
 .../phoenix/schema/stat/StatisticsUtils.java|  8 --
 .../org/apache/phoenix/util/SchemaUtil.java |  5 ++
 9 files changed, 166 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c054b2a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 0bf2710..4ddb322 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -34,8 +34,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
@@ -83,7 +81,6 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.stat.StatisticsCollector;
-import org.apache.phoenix.schema.stat.StatisticsTable;
 import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
@@ -116,8 +113,6 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver{
 public static final String EMPTY_CF = "EmptyCF";
 private static final Logger logger = 
LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
 private KeyValueBuilder kvBuilder;
-private static final Log LOG = 
LogFactory.getLog(UngroupedAggregateRegionObserver.class);
-private StatisticsTable statsTable = null;
 
 @Override
 public void start(CoprocessorEnvironment e) throws IOException {
@@ -125,8 +120,6 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver{
 // Can't use ClientKeyValueBuilder on server-side because the memstore 
expects to
 // be able to get a single backing buffer for a KeyValue.
 this.kvBuilder = GenericKeyValueBuilder.INSTANCE;
-String name = 
((RegionCoprocessorEnvironment)e).getRegion().getTableDesc().getTableName().getNameAsString();
-this.statsTable = 
StatisticsTable.getStatisticsTableForCoprocessor(e.getConfiguration(), name);
 }
 
 private static void commitBatch(HRegion region, List mutations, 
byte[] indexUUID) throws IOException {
@@ -161,12 +154,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver{
 @Override
 protected RegionScanner doPostScannerOpen(final 
ObserverContext c, final Scan scan, final 
RegionScanner s) throws IOException {
 int offset = 0;
-boolean isAnalyze = false;
 HRegion region = c.getEnvironment().getRegion();
 StatisticsCollector stats = null;
-if(ScanUtil.isAnalyzeTable(scan) && statsTable != null) {
-stats = new StatisticsCollector(statsTable, 
c.getEnvironment().getConfiguration());
-isAnalyze = true;
+if(ScanUtil.isAnalyzeTable(scan)) {
+// Let this throw, as this scan is being done for the sole purpose 
of collecting stats
+stats = new StatisticsCollector(c.getEnvironment(), 
region.getRegionInfo().getTable().getNameAsString());
 }
 if (ScanUtil.isLocalIndex(scan)) {
 /*
@@ -260,7 +252,7 @@ public class UngroupedAggregateRegionObserve

[3/5] git commit: PHOENIX-1315 Optimize query for Pig loader

2014-10-05 Thread jamestaylor
PHOENIX-1315 Optimize query for Pig loader


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5df8d1ec
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5df8d1ec
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5df8d1ec

Branch: refs/heads/4.0
Commit: 5df8d1ec4a2d755365738f9e0e6e8310bf96d83e
Parents: 8840af6
Author: James Taylor 
Authored: Sun Oct 5 09:53:14 2014 -0700
Committer: James Taylor 
Committed: Sun Oct 5 09:53:14 2014 -0700

--
 .../phoenix/end2end/EvaluationOfORIT.java   |  9 +--
 .../apache/phoenix/end2end/ReverseScanIT.java   |  4 +-
 ...ipRangeParallelIteratorRegionSplitterIT.java |  5 ++
 .../index/balancer/IndexLoadBalancerIT.java | 13 +++--
 .../org/apache/phoenix/compile/QueryPlan.java   |  3 +
 .../apache/phoenix/execute/AggregatePlan.java   |  6 ++
 .../phoenix/execute/DegenerateQueryPlan.java| 12 +++-
 .../apache/phoenix/execute/HashJoinPlan.java|  5 ++
 .../org/apache/phoenix/execute/ScanPlan.java|  8 +++
 .../phoenix/iterate/ConcatResultIterator.java   | 29 ++
 .../iterate/LookAheadResultIterator.java| 21 +++
 .../phoenix/iterate/ParallelIterators.java  | 39 +++--
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  6 ++
 .../phoenix/pig/PhoenixHBaseLoaderIT.java   | 24 
 .../phoenix/pig/hadoop/PhoenixInputFormat.java  | 13 +++--
 .../phoenix/pig/hadoop/PhoenixInputSplit.java   | 60 +++-
 .../phoenix/pig/hadoop/PhoenixRecordReader.java | 25 
 17 files changed, 196 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5df8d1ec/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
index 052ff43..0e59542 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
@@ -28,21 +28,22 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Properties;
 
+import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-@Category(BaseHBaseManagedTimeIT.class)
+@Category(HBaseManagedTimeTest.class)
 public class EvaluationOfORIT extends BaseHBaseManagedTimeIT{

@Test
public void testPKOrNotPKInOREvaluation() throws SQLException {
-   Properties props = new Properties(TEST_PROPERTIES);
+   Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props); 

conn.setAutoCommit(false);

-String create = "CREATE TABLE DIE ( ID INTEGER NOT NULL PRIMARY 
KEY,NAME VARCHAR(50) NOT NULL)";
+String create = "CREATE TABLE DIE ( ID INTEGER NOT NULL PRIMARY 
KEY,NAME VARCHAR(50))";
 PreparedStatement createStmt = conn.prepareStatement(create);
-createStmt.executeUpdate();
+createStmt.execute();
 PreparedStatement stmt = conn.prepareStatement(
 "upsert into " +
 "DIE VALUES (?, ?)");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5df8d1ec/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
index f7409a9..f738773 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
@@ -47,8 +47,8 @@ import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
-@Category(HBaseManagedTimeTest.class)
-public class ReverseScanIT extends BaseClientManagedTimeIT {
+@Category(ClientManagedTimeTest.class)
+public class ReverseScanIT extends BaseHBaseManagedTimeIT {
 @BeforeClass
 @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
 public static void doSetup() throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5df8d1ec/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParal

[5/5] git commit: PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars Hofhansl)

2014-10-05 Thread jamestaylor
PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars 
Hofhansl)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e49e8dcf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e49e8dcf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e49e8dcf

Branch: refs/heads/4.0
Commit: e49e8dcfbed740e13515c0b9aaf79db602059fd4
Parents: c9101f8
Author: James Taylor 
Authored: Sun Oct 5 13:26:52 2014 -0700
Committer: James Taylor 
Committed: Sun Oct 5 18:11:37 2014 -0700

--
 .../phoenix/end2end/CoalesceFunctionIT.java | 67 
 .../apache/phoenix/end2end/ReverseScanIT.java   |  2 +-
 ...ipRangeParallelIteratorRegionSplitterIT.java |  3 +-
 .../end2end/TenantSpecificTablesDDLIT.java  |  2 +-
 .../phoenix/end2end/ToCharFunctionIT.java   |  4 +-
 .../phoenix/end2end/ToNumberFunctionIT.java |  4 +-
 .../end2end/UpsertSelectAutoCommitIT.java   | 23 +++
 .../salted/SaltedTableVarLengthRowKeyIT.java|  8 +--
 .../apache/phoenix/compile/FromCompiler.java| 32 +++---
 .../apache/phoenix/compile/UpsertCompiler.java  | 19 ++
 .../apache/phoenix/execute/BaseQueryPlan.java   |  6 --
 11 files changed, 104 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e49e8dcf/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
index 57599e6..45fcb48 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
@@ -67,7 +67,7 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void coalesceWithSumExplicitLong() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
+String ddl = "CREATE TABLE TEST_COALESCE("
 + "ID BIGINT NOT NULL, "
 + "COUNT BIGINT "
 + "CONSTRAINT pk PRIMARY KEY(ID))";
@@ -91,7 +91,7 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void coalesceWithSumImplicitLong() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
+String ddl = "CREATE TABLE TEST_COALESCE("
 + "ID BIGINT NOT NULL, "
 + "COUNT BIGINT "
 + "CONSTRAINT pk PRIMARY KEY(ID))";
@@ -115,7 +115,7 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void coalesceWithSecondParamAsExpression() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
+String ddl = "CREATE TABLE TEST_COALESCE("
 + "ID BIGINT NOT NULL, "
 + "COUNT BIGINT "
 + "CONSTRAINT pk PRIMARY KEY(ID))";
@@ -139,7 +139,7 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void nonTypedSecondParameterLong() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
+String ddl = "CREATE TABLE TEST_COALESCE("
 + "ID BIGINT NOT NULL, "
 + "COUNT BIGINT " //first parameter to coalesce
 + "CONSTRAINT pk PRIMARY KEY(ID))";
@@ -163,47 +163,32 @@ public class CoalesceFunctionIT extends 
BaseHBaseManagedTimeIT {
 public void nonTypedSecondParameterUnsignedDataTypes() throws Exception {
 Connection conn = DriverManager.getConnection(getUrl());
 
-String[] dataTypes = {
-"UNSIGNED_INT",
-"UNSIGNED_LONG",
-"UNSIGNED_TINYINT",
-"UNSIGNED_SMALLINT",
-"UNSIGNED_FLOAT",
-"UNSIGNED_DOUBLE",
-"UNSIGNED_TIME",
-"UNSIGNED_DATE",
-"UNSIGNED_TIMESTAMP"
-};
-
-for (String dataType : dataTypes) {
-
-String ddl = "CREATE TABLE IF NOT EXISTS TEST_COALESCE("
-+ "ID BIGINT NOT NULL, "
-+ "COUNT " + dataType //first parameter to coalesce
-+ "CONSTRAINT pk PRIMARY KEY(ID))";
-conn.createStatement().execute(ddl);
-
-conn.createStatement().execute("UPSERT INTO TEST_COALESCE(ID, 
COUNT) VALUES(2, null)");
-co