Jenkins build is back to normal : Phoenix | 4.x-HBase-0.98 #1065

2016-02-15 Thread Apache Jenkins Server
See 



phoenix git commit: PHOENIX-1973 Improve CsvBulkLoadTool performance by moving keyvalue construction from map phase to reduce phase(Sergey Soldatov)

2016-02-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 1434ad7cd -> 75e4f4132


PHOENIX-1973 Improve CsvBulkLoadTool performance by moving keyvalue 
construction from map phase to reduce phase(Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/75e4f413
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/75e4f413
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/75e4f413

Branch: refs/heads/4.x-HBase-1.0
Commit: 75e4f413242c0e3639c847d834cae8899f6df151
Parents: 1434ad7
Author: Rajeshbabu Chintaguntla 
Authored: Tue Feb 16 12:14:13 2016 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Feb 16 12:14:13 2016 +0530

--
 .../phoenix/mapreduce/AbstractBulkLoadTool.java |   6 +-
 .../mapreduce/FormatToKeyValueMapper.java   | 164 ---
 .../mapreduce/FormatToKeyValueReducer.java  | 127 --
 .../bulkload/TargetTableRefFunctions.java   |  22 ++-
 4 files changed, 281 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/75e4f413/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index 39ee4b1..ab2848f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
@@ -268,7 +269,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 
 job.setInputFormatClass(TextInputFormat.class);
 job.setMapOutputKeyClass(TableRowkeyPair.class);
-job.setMapOutputValueClass(KeyValue.class);
+job.setMapOutputValueClass(ImmutableBytesWritable.class);
 job.setOutputKeyClass(TableRowkeyPair.class);
 job.setOutputValueClass(KeyValue.class);
 job.setReducerClass(FormatToKeyValueReducer.class);
@@ -276,7 +277,10 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 MultiHfileOutputFormat.configureIncrementalLoad(job, tablesToBeLoaded);
 
 final String tableNamesAsJson = 
TargetTableRefFunctions.NAMES_TO_JSON.apply(tablesToBeLoaded);
+final String logicalNamesAsJson = 
TargetTableRefFunctions.LOGICAN_NAMES_TO_JSON.apply(tablesToBeLoaded);
+
 
job.getConfiguration().set(FormatToKeyValueMapper.TABLE_NAMES_CONFKEY,tableNamesAsJson);
+
job.getConfiguration().set(FormatToKeyValueMapper.LOGICAL_NAMES_CONFKEY,logicalNamesAsJson);
 
 // give subclasses their hook
 setupJob(job);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/75e4f413/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
index 7e115e5..95b099e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
@@ -17,30 +17,30 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import java.io.DataOutputStream;
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
+import java.util.*;
 import javax.annotation.Nullable;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair;
 import 

phoenix git commit: PHOENIX-1973 Improve CsvBulkLoadTool performance by moving keyvalue construction from map phase to reduce phase(Sergey Soldatov)

2016-02-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 4f0fe167f -> 0b80eef66


PHOENIX-1973 Improve CsvBulkLoadTool performance by moving keyvalue 
construction from map phase to reduce phase(Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0b80eef6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0b80eef6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0b80eef6

Branch: refs/heads/4.x-HBase-0.98
Commit: 0b80eef660a57d4ed045ce6e070e7133a3610690
Parents: 4f0fe16
Author: Rajeshbabu Chintaguntla 
Authored: Tue Feb 16 12:13:31 2016 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Feb 16 12:13:31 2016 +0530

--
 .../phoenix/mapreduce/AbstractBulkLoadTool.java |   6 +-
 .../mapreduce/FormatToKeyValueMapper.java   | 164 ---
 .../mapreduce/FormatToKeyValueReducer.java  | 127 --
 .../bulkload/TargetTableRefFunctions.java   |  22 ++-
 4 files changed, 281 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0b80eef6/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index 39ee4b1..ab2848f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
@@ -268,7 +269,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 
 job.setInputFormatClass(TextInputFormat.class);
 job.setMapOutputKeyClass(TableRowkeyPair.class);
-job.setMapOutputValueClass(KeyValue.class);
+job.setMapOutputValueClass(ImmutableBytesWritable.class);
 job.setOutputKeyClass(TableRowkeyPair.class);
 job.setOutputValueClass(KeyValue.class);
 job.setReducerClass(FormatToKeyValueReducer.class);
@@ -276,7 +277,10 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 MultiHfileOutputFormat.configureIncrementalLoad(job, tablesToBeLoaded);
 
 final String tableNamesAsJson = 
TargetTableRefFunctions.NAMES_TO_JSON.apply(tablesToBeLoaded);
+final String logicalNamesAsJson = 
TargetTableRefFunctions.LOGICAN_NAMES_TO_JSON.apply(tablesToBeLoaded);
+
 
job.getConfiguration().set(FormatToKeyValueMapper.TABLE_NAMES_CONFKEY,tableNamesAsJson);
+
job.getConfiguration().set(FormatToKeyValueMapper.LOGICAL_NAMES_CONFKEY,logicalNamesAsJson);
 
 // give subclasses their hook
 setupJob(job);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0b80eef6/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
index 7e115e5..95b099e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
@@ -17,30 +17,30 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import java.io.DataOutputStream;
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
+import java.util.*;
 import javax.annotation.Nullable;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair;
 import 

phoenix git commit: PHOENIX-1973 Improve CsvBulkLoadTool performance by moving keyvalue construction from map phase to reduce phase(Sergey Soldatov)

2016-02-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master 60ef7cd54 -> e797b36c2


PHOENIX-1973 Improve CsvBulkLoadTool performance by moving keyvalue 
construction from map phase to reduce phase(Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e797b36c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e797b36c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e797b36c

Branch: refs/heads/master
Commit: e797b36c2ce42e9b9fd6b37fd8b9f79f79d6f18f
Parents: 60ef7cd
Author: Rajeshbabu Chintaguntla 
Authored: Tue Feb 16 12:12:23 2016 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Feb 16 12:12:23 2016 +0530

--
 .../phoenix/mapreduce/AbstractBulkLoadTool.java |   6 +-
 .../mapreduce/FormatToKeyValueMapper.java   | 164 ---
 .../mapreduce/FormatToKeyValueReducer.java  | 127 --
 .../bulkload/TargetTableRefFunctions.java   |  22 ++-
 4 files changed, 281 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e797b36c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index 39ee4b1..ab2848f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
@@ -268,7 +269,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 
 job.setInputFormatClass(TextInputFormat.class);
 job.setMapOutputKeyClass(TableRowkeyPair.class);
-job.setMapOutputValueClass(KeyValue.class);
+job.setMapOutputValueClass(ImmutableBytesWritable.class);
 job.setOutputKeyClass(TableRowkeyPair.class);
 job.setOutputValueClass(KeyValue.class);
 job.setReducerClass(FormatToKeyValueReducer.class);
@@ -276,7 +277,10 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 MultiHfileOutputFormat.configureIncrementalLoad(job, tablesToBeLoaded);
 
 final String tableNamesAsJson = 
TargetTableRefFunctions.NAMES_TO_JSON.apply(tablesToBeLoaded);
+final String logicalNamesAsJson = 
TargetTableRefFunctions.LOGICAN_NAMES_TO_JSON.apply(tablesToBeLoaded);
+
 
job.getConfiguration().set(FormatToKeyValueMapper.TABLE_NAMES_CONFKEY,tableNamesAsJson);
+
job.getConfiguration().set(FormatToKeyValueMapper.LOGICAL_NAMES_CONFKEY,logicalNamesAsJson);
 
 // give subclasses their hook
 setupJob(job);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e797b36c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
index 7e115e5..95b099e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
@@ -17,30 +17,30 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import java.io.DataOutputStream;
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
+import java.util.*;
 import javax.annotation.Nullable;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair;
 import org.apache.phoenix.mapreduce.bulkload.TargetTableRefFunctions;
 

Apache-Phoenix | 4.x-HBase-1.0 | Build Successful

2016-02-15 Thread Apache Jenkins Server
4.x-HBase-1.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.x-HBase-1.0

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.x-HBase-1.0/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.x-HBase-1.0/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-2221 Option to make data regions not writable when index regions

[jtaylor] PHOENIX-2635 Partial index rebuild doesn't work for mutable data

[jtaylor] PHOENIX-2602 Parser does not handle escaped LPAREN

[jtaylor] PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Apache-Phoenix | Master | Build Successful

2016-02-15 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/master

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-master/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-master/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-2221 Option to make data regions not writable when index regions

[jtaylor] PHOENIX-2635 Partial index rebuild doesn't work for mutable data

[jtaylor] PHOENIX-2602 Parser does not handle escaped LPAREN

[jtaylor] PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


[3/5] phoenix git commit: PHOENIX-2635 Partial index rebuild doesn't work for mutable data

2016-02-15 Thread jamestaylor
PHOENIX-2635 Partial index rebuild doesn't work for mutable data


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/974d3b70
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/974d3b70
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/974d3b70

Branch: refs/heads/4.x-HBase-1.0
Commit: 974d3b70f99b84e502cbdab35c53335b4a0d3804
Parents: a07c811
Author: James Taylor 
Authored: Mon Feb 15 00:33:05 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 11:25:32 2016 -0800

--
 .../end2end/index/MutableIndexFailureIT.java| 378 +++
 .../end2end/index/ReadOnlyIndexFailureIT.java   |  75 ++--
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |   2 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   5 +-
 .../coprocessor/MetaDataRegionObserver.java | 120 +-
 .../hbase/index/covered/LocalTableState.java|  19 +-
 .../phoenix/hbase/index/covered/TableState.java |   7 +-
 .../index/covered/data/LocalHBaseState.java |   6 +-
 .../hbase/index/covered/data/LocalTable.java|   9 +-
 .../example/CoveredColumnIndexCodec.java|   4 +-
 .../hbase/index/scanner/ScannerBuilder.java |   1 -
 .../apache/phoenix/index/IndexMaintainer.java   |   4 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |  11 +-
 .../phoenix/index/PhoenixIndexMetaData.java |  10 +-
 .../index/PhoenixTransactionalIndexer.java  |   2 +-
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  |  32 +-
 .../apache/phoenix/parse/NamedTableNode.java|   8 +
 .../phoenix/query/QueryServicesOptions.java |   2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  34 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java |   8 +-
 .../index/covered/TestLocalTableState.java  |  10 +-
 .../example/TestCoveredColumnIndexCodec.java|   4 +-
 22 files changed, 368 insertions(+), 383 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/974d3b70/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 567fc9a..ebc6988 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -30,24 +30,17 @@ import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseCluster;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.query.QueryServices;
@@ -61,7 +54,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -75,28 +67,29 @@ import com.google.common.collect.Maps;
  * For some reason dropping tables after running this test
  * fails unless it runs its own mini cluster. 
  * 
- * 
- * @since 2.1
  */
 
 @Category(NeedsOwnMiniClusterTest.class)
 @RunWith(Parameterized.class)
 public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
-private Timer scheduleTimer;
-
+public static volatile boolean FAIL_WRITE = false;
+public static final String 

[1/5] phoenix git commit: PHOENIX-2221 Option to make data regions not writable when index regions are not available (Alicia Ying Shu, James Taylor)

2016-02-15 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 6a3d6090f -> 1434ad7cd


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a07c811f/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index f5c9295..abd31c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -66,6 +66,8 @@ import org.apache.phoenix.util.SizedUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 
+import co.cask.tephra.TxConstants;
+
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
@@ -78,8 +80,6 @@ import com.google.common.collect.Maps;
 import com.google.protobuf.HBaseZeroCopyByteString;
 import com.sun.istack.NotNull;
 
-import co.cask.tephra.TxConstants;
-
 /**
  *
  * Base class for PTable implementors.  Provides abstraction for
@@ -101,6 +101,7 @@ public class PTableImpl implements PTable {
 private PIndexState state;
 private long sequenceNumber;
 private long timeStamp;
+private long indexDisableTimestamp;
 // Have MultiMap for String->PColumn (may need family qualifier)
 private List pkColumns;
 private List allColumns;
@@ -207,7 +208,7 @@ public class PTableImpl implements PTable {
 table.getSequenceNumber(), table.getPKName(), 
table.getBucketNum(), getColumnsToClone(table), parentSchemaName, 
table.getParentTableName(),
 indexes, table.isImmutableRows(), table.getPhysicalNames(), 
table.getDefaultFamilyName(), viewStatement,
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), 
table.getIndexType(),
-table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency());
+table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, List columns) 
throws SQLException {
@@ -216,7 +217,7 @@ public class PTableImpl implements PTable {
 table.getSequenceNumber(), table.getPKName(), 
table.getBucketNum(), columns, table.getParentSchemaName(), 
table.getParentTableName(),
 table.getIndexes(), table.isImmutableRows(), 
table.getPhysicalNames(), table.getDefaultFamilyName(), 
table.getViewStatement(),
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), 
table.getIndexType(),
-table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency());
+table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, long timeStamp, long 
sequenceNumber, List columns) throws SQLException {
@@ -225,7 +226,7 @@ public class PTableImpl implements PTable {
 sequenceNumber, table.getPKName(), table.getBucketNum(), 
columns, table.getParentSchemaName(), table.getParentTableName(), 
table.getIndexes(),
 table.isImmutableRows(), table.getPhysicalNames(), 
table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(),
 table.isMultiTenant(), table.getStoreNulls(), 
table.getViewType(), table.getViewIndexId(), table.getIndexType(), 
table.getTableStats(),
-table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency());
+table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency(), 
table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, long timeStamp, long 
sequenceNumber, List columns, boolean isImmutableRows) throws 
SQLException {
@@ -234,7 +235,7 @@ public class PTableImpl implements PTable {
 sequenceNumber, table.getPKName(), table.getBucketNum(), 
columns, table.getParentSchemaName(), table.getParentTableName(),
 table.getIndexes(), isImmutableRows, table.getPhysicalNames(), 
table.getDefaultFamilyName(), table.getViewStatement(),
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), 

[4/5] phoenix git commit: PHOENIX-2602 Parser does not handle escaped LPAREN

2016-02-15 Thread jamestaylor
PHOENIX-2602 Parser does not handle escaped LPAREN


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cb5cae17
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cb5cae17
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cb5cae17

Branch: refs/heads/4.x-HBase-1.0
Commit: cb5cae17efc7230d9d87aa18c17d0d7597787e27
Parents: 974d3b7
Author: James Taylor 
Authored: Mon Feb 15 01:44:31 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 11:25:53 2016 -0800

--
 phoenix-core/src/main/antlr3/PhoenixSQL.g | 7 ---
 .../test/java/org/apache/phoenix/parse/QueryParserTest.java   | 6 ++
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cb5cae17/phoenix-core/src/main/antlr3/PhoenixSQL.g
--
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g 
b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 0be5717..64e1d32 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -1213,14 +1213,14 @@ DIGIT
 STRING_LITERAL
 @init{ StringBuilder sb = new StringBuilder(); }
 :   '\''
-( t=CHAR_ESC { sb.append(getText()); }
-| t=CHAR { sb.append(t.getText()); }
+( t=CHAR { sb.append(t.getText()); } 
+| t=CHAR_ESC { sb.append(getText()); }
 )* '\'' { setText(sb.toString()); }
 ;
 
 fragment
 CHAR
-:   ( ~('\'') )
+:   ( ~('\'' | '\\') )
 ;
 
 fragment
@@ -1242,6 +1242,7 @@ CHAR_ESC
 | '\\'  { setText("\\"); }
 | '_'   { setText("\\_"); }
 | '%'   { setText("\\\%"); }
+|   { setText("\\"); }
 )
 |   '\'\''  { setText("\'"); }
 ;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cb5cae17/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index 5363042..70f590f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -766,4 +766,10 @@ public class QueryParserTest {
 String sql = "select * from t where 'a' <= ALL(a-b+1)";
 parseQuery(sql);
 }
+
+@Test
+public void testDoubleBackslash() throws Exception {
+String sql = "SELECT * FROM T WHERE A LIKE 'a\\(d'";
+parseQuery(sql);
+}
 }



[5/5] phoenix git commit: PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should compare with .equals() (Julian Eberius)

2016-02-15 Thread jamestaylor
PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should compare with 
.equals() (Julian Eberius)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1434ad7c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1434ad7c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1434ad7c

Branch: refs/heads/4.x-HBase-1.0
Commit: 1434ad7cda647f1946e4f3cc7a89ced6a1c00835
Parents: cb5cae1
Author: James Taylor 
Authored: Mon Feb 15 09:50:43 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 11:26:11 2016 -0800

--
 .../phoenix/end2end/CompareDecimalToLongIT.java | 241 --
 .../apache/phoenix/end2end/PrimitiveTypeIT.java | 245 +++
 .../phoenix/expression/LiteralExpression.java   |   2 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  14 +-
 4 files changed, 252 insertions(+), 250 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1434ad7c/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
deleted file mode 100644
index 3a358c4..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Properties;
-
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.junit.Test;
-
-
-public class CompareDecimalToLongIT extends BaseClientManagedTimeIT {
-protected static void initTableValues(byte[][] splits, long ts) throws 
Exception {
-ensureTableCreated(getUrl(),"LongInKeyTest",splits, ts-2);
-
-// Insert all rows at ts
-String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
-Connection conn = DriverManager.getConnection(url);
-conn.setAutoCommit(true);
-PreparedStatement stmt = conn.prepareStatement(
-"upsert into " +
-"LongInKeyTest VALUES(?)");
-stmt.setLong(1, 2);
-stmt.execute();
-conn.close();
-}
-
-@Test
-public void testCompareLongGTDecimal() throws Exception {
-long ts = nextTimestamp();
-initTableValues(null, ts);
-String query = "SELECT l FROM LongInKeyTest where l > 1.5";
-Properties props = new Properties();
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
-Connection conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement statement = conn.prepareStatement(query);
-ResultSet rs = statement.executeQuery();
-assertTrue (rs.next());
-assertEquals(2, rs.getLong(1));
-assertFalse(rs.next());
-} finally {
-conn.close();
-}
-}
-
-@Test
-public void testCompareLongGTEDecimal() throws Exception {
-long ts = nextTimestamp();
-initTableValues(null, ts);
-String query = "SELECT l FROM LongInKeyTest where l >= 1.5";
-Properties props = new Properties();
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
-Connection conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement statement = conn.prepareStatement(query);
-ResultSet rs = statement.executeQuery();
-/*
- *  Failing because 

[2/5] phoenix git commit: PHOENIX-2221 Option to make data regions not writable when index regions are not available (Alicia Ying Shu, James Taylor)

2016-02-15 Thread jamestaylor
PHOENIX-2221 Option to make data regions not writable when index regions are 
not available (Alicia Ying Shu, James Taylor)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a07c811f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a07c811f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a07c811f

Branch: refs/heads/4.x-HBase-1.0
Commit: a07c811f4643844c56a2e0840deddba9e7bc87d2
Parents: 6a3d609
Author: James Taylor 
Authored: Sun Feb 14 09:06:14 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 11:23:54 2016 -0800

--
 .../end2end/index/MutableIndexFailureIT.java|  31 +-
 .../end2end/index/ReadOnlyIndexFailureIT.java   | 289 +++
 .../apache/phoenix/compile/FromCompiler.java|   2 +-
 .../apache/phoenix/compile/JoinCompiler.java|   2 +-
 .../compile/TupleProjectionCompiler.java|   4 +-
 .../apache/phoenix/compile/UnionCompiler.java   |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  87 --
 .../coprocessor/MetaDataRegionObserver.java |  29 +-
 .../coprocessor/generated/PTableProtos.java | 103 ++-
 .../phoenix/exception/SQLExceptionCode.java |   2 +
 .../apache/phoenix/execute/MutationState.java   |  27 +-
 .../index/write/DelegateIndexFailurePolicy.java |  58 
 .../index/PhoenixIndexFailurePolicy.java|  48 ++-
 .../org/apache/phoenix/query/QueryServices.java |   3 +
 .../phoenix/query/QueryServicesOptions.java |   1 +
 .../apache/phoenix/schema/DelegateTable.java|   5 +
 .../apache/phoenix/schema/MetaDataClient.java   |  38 +--
 .../java/org/apache/phoenix/schema/PTable.java  |   1 +
 .../org/apache/phoenix/schema/PTableImpl.java   |  51 ++--
 .../phoenix/execute/CorrelatePlanTest.java  |   2 +-
 phoenix-protocol/src/main/PTable.proto  |   1 +
 21 files changed, 655 insertions(+), 131 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a07c811f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 0861dd4..567fc9a 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -172,7 +172,7 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 TableName indexTable =
 TableName.valueOf(localIndex ? MetaDataUtil
 .getLocalIndexTableName(fullTableName) : 
fullIndexName);
-HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+HBaseAdmin admin = getUtility().getHBaseAdmin();
 HTableDescriptor indexTableDesc = 
admin.getTableDescriptor(indexTable);
 try{
 admin.disableTable(indexTable);
@@ -184,20 +184,10 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 stmt.setString(2, "x2");
 stmt.setString(3, "2");
 stmt.execute();
-if (transactional) {
-try {
-conn.commit();
-fail();
-} catch (SQLException e) {
-conn.rollback();
-}
-}
-else {
-try {
-conn.commit();
-fail();
-} catch (SQLException e) {
-}
+try {
+conn.commit();
+fail();
+} catch (SQLException e) {
 }
 
 // Verify the metadata for index is correct.
@@ -340,9 +330,9 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 // find a RS which doesn't has CATALOG table
 TableName catalogTable = TableName.valueOf("SYSTEM.CATALOG");
 TableName indexTable = TableName.valueOf(fullIndexName);
-final HBaseCluster cluster = this.getUtility().getHBaseCluster();
+final HBaseCluster cluster = getUtility().getHBaseCluster();
 Collection rss = 
cluster.getClusterStatus().getServers();
-HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+HBaseAdmin admin = getUtility().getHBaseAdmin();
 List regions = admin.getTableRegions(catalogTable);
 ServerName catalogRS = 
cluster.getServerHoldingRegion(regions.get(0).getTable(),
 regions.get(0).getRegionName());
@@ -362,7 +352,7 @@ 

[2/5] phoenix git commit: PHOENIX-2221 Option to make data regions not writable when index regions are not available (Alicia Ying Shu, James Taylor)

2016-02-15 Thread jamestaylor
PHOENIX-2221 Option to make data regions not writable when index regions are 
not available (Alicia Ying Shu, James Taylor)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b32bfc96
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b32bfc96
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b32bfc96

Branch: refs/heads/4.x-HBase-0.98
Commit: b32bfc96ca5f1fbe6ebf0062f1bd813ce5313b64
Parents: 07dd496
Author: James Taylor 
Authored: Sun Feb 14 09:06:14 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 11:17:05 2016 -0800

--
 .../end2end/index/MutableIndexFailureIT.java|  31 +-
 .../end2end/index/ReadOnlyIndexFailureIT.java   | 289 +++
 .../apache/phoenix/compile/FromCompiler.java|   2 +-
 .../apache/phoenix/compile/JoinCompiler.java|   2 +-
 .../compile/TupleProjectionCompiler.java|   4 +-
 .../apache/phoenix/compile/UnionCompiler.java   |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  87 --
 .../coprocessor/MetaDataRegionObserver.java |  29 +-
 .../coprocessor/generated/PTableProtos.java | 103 ++-
 .../phoenix/exception/SQLExceptionCode.java |   2 +
 .../apache/phoenix/execute/MutationState.java   |  27 +-
 .../index/write/DelegateIndexFailurePolicy.java |  58 
 .../index/PhoenixIndexFailurePolicy.java|  48 ++-
 .../org/apache/phoenix/query/QueryServices.java |   3 +
 .../phoenix/query/QueryServicesOptions.java |   1 +
 .../apache/phoenix/schema/DelegateTable.java|   5 +
 .../apache/phoenix/schema/MetaDataClient.java   |  38 +--
 .../java/org/apache/phoenix/schema/PTable.java  |   1 +
 .../org/apache/phoenix/schema/PTableImpl.java   |  51 ++--
 .../phoenix/execute/CorrelatePlanTest.java  |   2 +-
 phoenix-protocol/src/main/PTable.proto  |   1 +
 21 files changed, 655 insertions(+), 131 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b32bfc96/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 76fff83..d5ef673 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -172,7 +172,7 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 TableName indexTable =
 TableName.valueOf(localIndex ? MetaDataUtil
 .getLocalIndexTableName(fullTableName) : 
fullIndexName);
-HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+HBaseAdmin admin = getUtility().getHBaseAdmin();
 HTableDescriptor indexTableDesc = 
admin.getTableDescriptor(indexTable);
 try{
 admin.disableTable(indexTable);
@@ -184,20 +184,10 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 stmt.setString(2, "x2");
 stmt.setString(3, "2");
 stmt.execute();
-if (transactional) {
-try {
-conn.commit();
-fail();
-} catch (SQLException e) {
-conn.rollback();
-}
-}
-else {
-try {
-conn.commit();
-fail();
-} catch (SQLException e) {
-}
+try {
+conn.commit();
+fail();
+} catch (SQLException e) {
 }
 
 // Verify the metadata for index is correct.
@@ -340,9 +330,9 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 // find a RS which doesn't has CATALOG table
 TableName catalogTable = TableName.valueOf("SYSTEM.CATALOG");
 TableName indexTable = TableName.valueOf(fullIndexName);
-final HBaseCluster cluster = this.getUtility().getHBaseCluster();
+final HBaseCluster cluster = getUtility().getHBaseCluster();
 Collection rss = 
cluster.getClusterStatus().getServers();
-HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+HBaseAdmin admin = getUtility().getHBaseAdmin();
 List regions = admin.getTableRegions(catalogTable);
 ServerName catalogRS = 
cluster.getServerHoldingRegion(regions.get(0).getRegionName());
 ServerName metaRS = cluster.getServerHoldingMeta();
@@ 

[5/5] phoenix git commit: PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should compare with .equals() (Julian Eberius)

2016-02-15 Thread jamestaylor
PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should compare with 
.equals() (Julian Eberius)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4f0fe167
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4f0fe167
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4f0fe167

Branch: refs/heads/4.x-HBase-0.98
Commit: 4f0fe167f4cfe5b192e6c05c77ccae2db8fdfc23
Parents: 99e08f7
Author: James Taylor 
Authored: Mon Feb 15 09:50:43 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 11:21:13 2016 -0800

--
 .../phoenix/end2end/CompareDecimalToLongIT.java | 241 --
 .../apache/phoenix/end2end/PrimitiveTypeIT.java | 245 +++
 .../phoenix/expression/LiteralExpression.java   |   2 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  14 +-
 4 files changed, 252 insertions(+), 250 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f0fe167/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
deleted file mode 100644
index 3a358c4..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Properties;
-
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.junit.Test;
-
-
-public class CompareDecimalToLongIT extends BaseClientManagedTimeIT {
-protected static void initTableValues(byte[][] splits, long ts) throws 
Exception {
-ensureTableCreated(getUrl(),"LongInKeyTest",splits, ts-2);
-
-// Insert all rows at ts
-String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
-Connection conn = DriverManager.getConnection(url);
-conn.setAutoCommit(true);
-PreparedStatement stmt = conn.prepareStatement(
-"upsert into " +
-"LongInKeyTest VALUES(?)");
-stmt.setLong(1, 2);
-stmt.execute();
-conn.close();
-}
-
-@Test
-public void testCompareLongGTDecimal() throws Exception {
-long ts = nextTimestamp();
-initTableValues(null, ts);
-String query = "SELECT l FROM LongInKeyTest where l > 1.5";
-Properties props = new Properties();
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
-Connection conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement statement = conn.prepareStatement(query);
-ResultSet rs = statement.executeQuery();
-assertTrue (rs.next());
-assertEquals(2, rs.getLong(1));
-assertFalse(rs.next());
-} finally {
-conn.close();
-}
-}
-
-@Test
-public void testCompareLongGTEDecimal() throws Exception {
-long ts = nextTimestamp();
-initTableValues(null, ts);
-String query = "SELECT l FROM LongInKeyTest where l >= 1.5";
-Properties props = new Properties();
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
-Connection conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement statement = conn.prepareStatement(query);
-ResultSet rs = statement.executeQuery();
-/*
- *  Failing because 

[1/5] phoenix git commit: PHOENIX-2221 Option to make data regions not writable when index regions are not available (Alicia Ying Shu, James Taylor)

2016-02-15 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 07dd49674 -> 4f0fe167f


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b32bfc96/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index f5c9295..abd31c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -66,6 +66,8 @@ import org.apache.phoenix.util.SizedUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 
+import co.cask.tephra.TxConstants;
+
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
@@ -78,8 +80,6 @@ import com.google.common.collect.Maps;
 import com.google.protobuf.HBaseZeroCopyByteString;
 import com.sun.istack.NotNull;
 
-import co.cask.tephra.TxConstants;
-
 /**
  *
  * Base class for PTable implementors.  Provides abstraction for
@@ -101,6 +101,7 @@ public class PTableImpl implements PTable {
 private PIndexState state;
 private long sequenceNumber;
 private long timeStamp;
+private long indexDisableTimestamp;
 // Have MultiMap for String->PColumn (may need family qualifier)
 private List pkColumns;
 private List allColumns;
@@ -207,7 +208,7 @@ public class PTableImpl implements PTable {
 table.getSequenceNumber(), table.getPKName(), 
table.getBucketNum(), getColumnsToClone(table), parentSchemaName, 
table.getParentTableName(),
 indexes, table.isImmutableRows(), table.getPhysicalNames(), 
table.getDefaultFamilyName(), viewStatement,
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), 
table.getIndexType(),
-table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency());
+table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, List columns) 
throws SQLException {
@@ -216,7 +217,7 @@ public class PTableImpl implements PTable {
 table.getSequenceNumber(), table.getPKName(), 
table.getBucketNum(), columns, table.getParentSchemaName(), 
table.getParentTableName(),
 table.getIndexes(), table.isImmutableRows(), 
table.getPhysicalNames(), table.getDefaultFamilyName(), 
table.getViewStatement(),
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), 
table.getIndexType(),
-table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency());
+table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, long timeStamp, long 
sequenceNumber, List columns) throws SQLException {
@@ -225,7 +226,7 @@ public class PTableImpl implements PTable {
 sequenceNumber, table.getPKName(), table.getBucketNum(), 
columns, table.getParentSchemaName(), table.getParentTableName(), 
table.getIndexes(),
 table.isImmutableRows(), table.getPhysicalNames(), 
table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(),
 table.isMultiTenant(), table.getStoreNulls(), 
table.getViewType(), table.getViewIndexId(), table.getIndexType(), 
table.getTableStats(),
-table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency());
+table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency(), 
table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, long timeStamp, long 
sequenceNumber, List columns, boolean isImmutableRows) throws 
SQLException {
@@ -234,7 +235,7 @@ public class PTableImpl implements PTable {
 sequenceNumber, table.getPKName(), table.getBucketNum(), 
columns, table.getParentSchemaName(), table.getParentTableName(),
 table.getIndexes(), isImmutableRows, table.getPhysicalNames(), 
table.getDefaultFamilyName(), table.getViewStatement(),
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), 

[4/5] phoenix git commit: PHOENIX-2602 Parser does not handle escaped LPAREN

2016-02-15 Thread jamestaylor
PHOENIX-2602 Parser does not handle escaped LPAREN


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/99e08f7d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/99e08f7d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/99e08f7d

Branch: refs/heads/4.x-HBase-0.98
Commit: 99e08f7d07519c227839006a2247daadcd2ca9aa
Parents: 7f9bca7
Author: James Taylor 
Authored: Mon Feb 15 01:44:31 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 11:20:58 2016 -0800

--
 phoenix-core/src/main/antlr3/PhoenixSQL.g | 7 ---
 .../test/java/org/apache/phoenix/parse/QueryParserTest.java   | 6 ++
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/99e08f7d/phoenix-core/src/main/antlr3/PhoenixSQL.g
--
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g 
b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 1aa7f3d..834a410 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -1205,14 +1205,14 @@ DIGIT
 STRING_LITERAL
 @init{ StringBuilder sb = new StringBuilder(); }
 :   '\''
-( t=CHAR_ESC { sb.append(getText()); }
-| t=CHAR { sb.append(t.getText()); }
+( t=CHAR { sb.append(t.getText()); } 
+| t=CHAR_ESC { sb.append(getText()); }
 )* '\'' { setText(sb.toString()); }
 ;
 
 fragment
 CHAR
-:   ( ~('\'') )
+:   ( ~('\'' | '\\') )
 ;
 
 fragment
@@ -1234,6 +1234,7 @@ CHAR_ESC
 | '\\'  { setText("\\"); }
 | '_'   { setText("\\_"); }
 | '%'   { setText("\\\%"); }
+|   { setText("\\"); }
 )
 |   '\'\''  { setText("\'"); }
 ;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/99e08f7d/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index 5363042..70f590f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -766,4 +766,10 @@ public class QueryParserTest {
 String sql = "select * from t where 'a' <= ALL(a-b+1)";
 parseQuery(sql);
 }
+
+@Test
+public void testDoubleBackslash() throws Exception {
+String sql = "SELECT * FROM T WHERE A LIKE 'a\\(d'";
+parseQuery(sql);
+}
 }



[3/5] phoenix git commit: PHOENIX-2635 Partial index rebuild doesn't work for mutable data

2016-02-15 Thread jamestaylor
PHOENIX-2635 Partial index rebuild doesn't work for mutable data


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7f9bca75
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7f9bca75
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7f9bca75

Branch: refs/heads/4.x-HBase-0.98
Commit: 7f9bca75a91d7c5c8f58aaed2f5706524ff1866c
Parents: b32bfc9
Author: James Taylor 
Authored: Mon Feb 15 00:33:05 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 11:20:36 2016 -0800

--
 .../end2end/index/MutableIndexFailureIT.java| 376 +++
 .../end2end/index/ReadOnlyIndexFailureIT.java   |  75 ++--
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |   2 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   5 +-
 .../coprocessor/MetaDataRegionObserver.java | 120 +-
 .../hbase/index/covered/LocalTableState.java|  19 +-
 .../phoenix/hbase/index/covered/TableState.java |   7 +-
 .../index/covered/data/LocalHBaseState.java |   6 +-
 .../hbase/index/covered/data/LocalTable.java|   9 +-
 .../example/CoveredColumnIndexCodec.java|   4 +-
 .../hbase/index/scanner/ScannerBuilder.java |   1 -
 .../apache/phoenix/index/IndexMaintainer.java   |   4 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |  11 +-
 .../phoenix/index/PhoenixIndexMetaData.java |  10 +-
 .../index/PhoenixTransactionalIndexer.java  |   2 +-
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  |  32 +-
 .../apache/phoenix/parse/NamedTableNode.java|   8 +
 .../phoenix/query/QueryServicesOptions.java |   2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  34 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java |   8 +-
 .../index/covered/TestLocalTableState.java  |  10 +-
 .../example/TestCoveredColumnIndexCodec.java|   4 +-
 22 files changed, 368 insertions(+), 381 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f9bca75/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index d5ef673..ebc6988 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -30,24 +30,17 @@ import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseCluster;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.query.QueryServices;
@@ -61,7 +54,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -75,28 +67,29 @@ import com.google.common.collect.Maps;
  * For some reason dropping tables after running this test
  * fails unless it runs its own mini cluster. 
  * 
- * 
- * @since 2.1
  */
 
 @Category(NeedsOwnMiniClusterTest.class)
 @RunWith(Parameterized.class)
 public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
-private Timer scheduleTimer;
-
+public static volatile boolean FAIL_WRITE = false;
+public static final String 

[4/5] phoenix git commit: PHOENIX-2602 Parser does not handle escaped LPAREN

2016-02-15 Thread jamestaylor
PHOENIX-2602 Parser does not handle escaped LPAREN


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/43b34da1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/43b34da1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/43b34da1

Branch: refs/heads/master
Commit: 43b34da1d4e10bef233bbb748c5dd1be11d7ce18
Parents: 046bda3
Author: James Taylor 
Authored: Mon Feb 15 01:44:31 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 10:14:58 2016 -0800

--
 phoenix-core/src/main/antlr3/PhoenixSQL.g | 7 ---
 .../test/java/org/apache/phoenix/parse/QueryParserTest.java   | 6 ++
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/43b34da1/phoenix-core/src/main/antlr3/PhoenixSQL.g
--
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g 
b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 0be5717..64e1d32 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -1213,14 +1213,14 @@ DIGIT
 STRING_LITERAL
 @init{ StringBuilder sb = new StringBuilder(); }
 :   '\''
-( t=CHAR_ESC { sb.append(getText()); }
-| t=CHAR { sb.append(t.getText()); }
+( t=CHAR { sb.append(t.getText()); } 
+| t=CHAR_ESC { sb.append(getText()); }
 )* '\'' { setText(sb.toString()); }
 ;
 
 fragment
 CHAR
-:   ( ~('\'') )
+:   ( ~('\'' | '\\') )
 ;
 
 fragment
@@ -1242,6 +1242,7 @@ CHAR_ESC
 | '\\'  { setText("\\"); }
 | '_'   { setText("\\_"); }
 | '%'   { setText("\\\%"); }
+|   { setText("\\"); }
 )
 |   '\'\''  { setText("\'"); }
 ;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/43b34da1/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index 5363042..70f590f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -766,4 +766,10 @@ public class QueryParserTest {
 String sql = "select * from t where 'a' <= ALL(a-b+1)";
 parseQuery(sql);
 }
+
+@Test
+public void testDoubleBackslash() throws Exception {
+String sql = "SELECT * FROM T WHERE A LIKE 'a\\(d'";
+parseQuery(sql);
+}
 }



[2/5] phoenix git commit: PHOENIX-2221 Option to make data regions not writable when index regions are not available (Alicia Ying Shu, James Taylor)

2016-02-15 Thread jamestaylor
PHOENIX-2221 Option to make data regions not writable when index regions are 
not available (Alicia Ying Shu, James Taylor)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e2a6386f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e2a6386f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e2a6386f

Branch: refs/heads/master
Commit: e2a6386f3b9343aec74c5f96f0e0124e80b9f8b1
Parents: 6881aef
Author: James Taylor 
Authored: Sun Feb 14 09:06:14 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 00:33:18 2016 -0800

--
 .../end2end/index/MutableIndexFailureIT.java|  31 +-
 .../end2end/index/ReadOnlyIndexFailureIT.java   | 289 +++
 .../apache/phoenix/compile/FromCompiler.java|   2 +-
 .../apache/phoenix/compile/JoinCompiler.java|   2 +-
 .../compile/TupleProjectionCompiler.java|   4 +-
 .../apache/phoenix/compile/UnionCompiler.java   |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  92 +++---
 .../coprocessor/MetaDataRegionObserver.java |  27 +-
 .../coprocessor/generated/PTableProtos.java | 103 ++-
 .../phoenix/exception/SQLExceptionCode.java |   2 +
 .../apache/phoenix/execute/MutationState.java   |  39 ++-
 .../index/write/DelegateIndexFailurePolicy.java |  58 
 .../index/PhoenixIndexFailurePolicy.java|  48 ++-
 .../org/apache/phoenix/query/QueryServices.java |   3 +
 .../phoenix/query/QueryServicesOptions.java |   1 +
 .../apache/phoenix/schema/DelegateTable.java|   5 +
 .../apache/phoenix/schema/MetaDataClient.java   |  38 +--
 .../java/org/apache/phoenix/schema/PTable.java  |   1 +
 .../org/apache/phoenix/schema/PTableImpl.java   |  51 ++--
 .../phoenix/execute/CorrelatePlanTest.java  |   2 +-
 phoenix-protocol/src/main/PTable.proto  |   1 +
 21 files changed, 660 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2a6386f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 5f39515..176c5a0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -172,7 +172,7 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 TableName indexTable =
 TableName.valueOf(localIndex ? MetaDataUtil
 .getLocalIndexTableName(fullTableName) : 
fullIndexName);
-HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+HBaseAdmin admin = getUtility().getHBaseAdmin();
 HTableDescriptor indexTableDesc = 
admin.getTableDescriptor(indexTable);
 try{
 admin.disableTable(indexTable);
@@ -184,20 +184,10 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 stmt.setString(2, "x2");
 stmt.setString(3, "2");
 stmt.execute();
-if (transactional) {
-try {
-conn.commit();
-fail();
-} catch (SQLException e) {
-conn.rollback();
-}
-}
-else {
-try {
-conn.commit();
-fail();
-} catch (SQLException e) {
-}
+try {
+conn.commit();
+fail();
+} catch (SQLException e) {
 }
 
 // Verify the metadata for index is correct.
@@ -341,9 +331,9 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 // find a RS which doesn't has CATALOG table
 TableName catalogTable = TableName.valueOf("SYSTEM.CATALOG");
 TableName indexTable = TableName.valueOf(fullIndexName);
-final HBaseCluster cluster = this.getUtility().getHBaseCluster();
+final HBaseCluster cluster = getUtility().getHBaseCluster();
 Collection rss = 
cluster.getClusterStatus().getServers();
-HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+HBaseAdmin admin = getUtility().getHBaseAdmin();
 List regions = admin.getTableRegions(catalogTable);
 ServerName catalogRS = 
cluster.getServerHoldingRegion(regions.get(0).getTable(),
 regions.get(0).getRegionName());
@@ -363,7 +353,7 @@ public 

[1/5] phoenix git commit: PHOENIX-2221 Option to make data regions not writable when index regions are not available (Alicia Ying Shu, James Taylor)

2016-02-15 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master 6881aef0c -> 60ef7cd54


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2a6386f/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index f5c9295..abd31c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -66,6 +66,8 @@ import org.apache.phoenix.util.SizedUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 
+import co.cask.tephra.TxConstants;
+
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
@@ -78,8 +80,6 @@ import com.google.common.collect.Maps;
 import com.google.protobuf.HBaseZeroCopyByteString;
 import com.sun.istack.NotNull;
 
-import co.cask.tephra.TxConstants;
-
 /**
  *
  * Base class for PTable implementors.  Provides abstraction for
@@ -101,6 +101,7 @@ public class PTableImpl implements PTable {
 private PIndexState state;
 private long sequenceNumber;
 private long timeStamp;
+private long indexDisableTimestamp;
 // Have MultiMap for String->PColumn (may need family qualifier)
 private List pkColumns;
 private List allColumns;
@@ -207,7 +208,7 @@ public class PTableImpl implements PTable {
 table.getSequenceNumber(), table.getPKName(), 
table.getBucketNum(), getColumnsToClone(table), parentSchemaName, 
table.getParentTableName(),
 indexes, table.isImmutableRows(), table.getPhysicalNames(), 
table.getDefaultFamilyName(), viewStatement,
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), 
table.getIndexType(),
-table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency());
+table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, List columns) 
throws SQLException {
@@ -216,7 +217,7 @@ public class PTableImpl implements PTable {
 table.getSequenceNumber(), table.getPKName(), 
table.getBucketNum(), columns, table.getParentSchemaName(), 
table.getParentTableName(),
 table.getIndexes(), table.isImmutableRows(), 
table.getPhysicalNames(), table.getDefaultFamilyName(), 
table.getViewStatement(),
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), 
table.getIndexType(),
-table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency());
+table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, long timeStamp, long 
sequenceNumber, List columns) throws SQLException {
@@ -225,7 +226,7 @@ public class PTableImpl implements PTable {
 sequenceNumber, table.getPKName(), table.getBucketNum(), 
columns, table.getParentSchemaName(), table.getParentTableName(), 
table.getIndexes(),
 table.isImmutableRows(), table.getPhysicalNames(), 
table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(),
 table.isMultiTenant(), table.getStoreNulls(), 
table.getViewType(), table.getViewIndexId(), table.getIndexType(), 
table.getTableStats(),
-table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency());
+table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency(), 
table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, long timeStamp, long 
sequenceNumber, List columns, boolean isImmutableRows) throws 
SQLException {
@@ -234,7 +235,7 @@ public class PTableImpl implements PTable {
 sequenceNumber, table.getPKName(), table.getBucketNum(), 
columns, table.getParentSchemaName(), table.getParentTableName(),
 table.getIndexes(), isImmutableRows, table.getPhysicalNames(), 
table.getDefaultFamilyName(), table.getViewStatement(),
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(),
-  

[5/5] phoenix git commit: PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should compare with .equals() (Julian Eberius)

2016-02-15 Thread jamestaylor
PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should compare with 
.equals() (Julian Eberius)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/60ef7cd5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/60ef7cd5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/60ef7cd5

Branch: refs/heads/master
Commit: 60ef7cd54e26fd1635e503c7d7981ba2cdf4c6fc
Parents: 43b34da
Author: James Taylor 
Authored: Mon Feb 15 09:50:43 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 10:14:58 2016 -0800

--
 .../phoenix/end2end/CompareDecimalToLongIT.java | 241 --
 .../apache/phoenix/end2end/PrimitiveTypeIT.java | 245 +++
 .../phoenix/expression/LiteralExpression.java   |   2 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  14 +-
 4 files changed, 252 insertions(+), 250 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/60ef7cd5/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
deleted file mode 100644
index 3a358c4..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Properties;
-
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.junit.Test;
-
-
-public class CompareDecimalToLongIT extends BaseClientManagedTimeIT {
-protected static void initTableValues(byte[][] splits, long ts) throws 
Exception {
-ensureTableCreated(getUrl(),"LongInKeyTest",splits, ts-2);
-
-// Insert all rows at ts
-String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
-Connection conn = DriverManager.getConnection(url);
-conn.setAutoCommit(true);
-PreparedStatement stmt = conn.prepareStatement(
-"upsert into " +
-"LongInKeyTest VALUES(?)");
-stmt.setLong(1, 2);
-stmt.execute();
-conn.close();
-}
-
-@Test
-public void testCompareLongGTDecimal() throws Exception {
-long ts = nextTimestamp();
-initTableValues(null, ts);
-String query = "SELECT l FROM LongInKeyTest where l > 1.5";
-Properties props = new Properties();
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
-Connection conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement statement = conn.prepareStatement(query);
-ResultSet rs = statement.executeQuery();
-assertTrue (rs.next());
-assertEquals(2, rs.getLong(1));
-assertFalse(rs.next());
-} finally {
-conn.close();
-}
-}
-
-@Test
-public void testCompareLongGTEDecimal() throws Exception {
-long ts = nextTimestamp();
-initTableValues(null, ts);
-String query = "SELECT l FROM LongInKeyTest where l >= 1.5";
-Properties props = new Properties();
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
-Connection conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement statement = conn.prepareStatement(query);
-ResultSet rs = statement.executeQuery();
-/*
- *  Failing because we're 

[3/5] phoenix git commit: PHOENIX-2635 Partial index rebuild doesn't work for mutable data

2016-02-15 Thread jamestaylor
PHOENIX-2635 Partial index rebuild doesn't work for mutable data


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/046bda34
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/046bda34
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/046bda34

Branch: refs/heads/master
Commit: 046bda34771aaec3befd4ad17024afc5af9b83ed
Parents: e2a6386
Author: James Taylor 
Authored: Mon Feb 15 00:33:05 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 10:14:54 2016 -0800

--
 .../end2end/index/MutableIndexFailureIT.java| 379 +++
 .../end2end/index/ReadOnlyIndexFailureIT.java   |  75 ++--
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |   2 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   5 +-
 .../coprocessor/MetaDataRegionObserver.java | 120 +-
 .../hbase/index/covered/LocalTableState.java|  19 +-
 .../phoenix/hbase/index/covered/TableState.java |   7 +-
 .../index/covered/data/LocalHBaseState.java |   6 +-
 .../hbase/index/covered/data/LocalTable.java|   9 +-
 .../example/CoveredColumnIndexCodec.java|   4 +-
 .../hbase/index/scanner/ScannerBuilder.java |   1 -
 .../apache/phoenix/index/IndexMaintainer.java   |   4 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |  12 +-
 .../phoenix/index/PhoenixIndexMetaData.java |  10 +-
 .../index/PhoenixTransactionalIndexer.java  |   2 +-
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  |  32 +-
 .../apache/phoenix/parse/NamedTableNode.java|   8 +
 .../phoenix/query/QueryServicesOptions.java |   2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  34 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java |   8 +-
 .../index/covered/TestLocalTableState.java  |  10 +-
 .../example/TestCoveredColumnIndexCodec.java|   4 +-
 22 files changed, 368 insertions(+), 385 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/046bda34/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 176c5a0..ebc6988 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -30,24 +30,17 @@ import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseCluster;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.query.QueryServices;
@@ -61,7 +54,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -75,28 +67,29 @@ import com.google.common.collect.Maps;
  * For some reason dropping tables after running this test
  * fails unless it runs its own mini cluster. 
  * 
- * 
- * @since 2.1
  */
 
 @Category(NeedsOwnMiniClusterTest.class)
 @RunWith(Parameterized.class)
 public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
-private Timer scheduleTimer;
-
+public static volatile boolean FAIL_WRITE = false;
+public static final String INDEX_NAME 

Jenkins build is back to normal : Phoenix | 4.x-HBase-0.98 #1063

2016-02-15 Thread Apache Jenkins Server
See 



Jenkins build is back to normal : Phoenix-4.x-HBase-1.0 #385

2016-02-15 Thread Apache Jenkins Server
See 



phoenix git commit: PHOENIX-2671 System.STATS table getting truncated every time on new client connection(addendum)

2016-02-15 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 19012c026 -> 6a3d6090f


PHOENIX-2671 System.STATS table getting truncated every time on new client 
connection(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6a3d6090
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6a3d6090
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6a3d6090

Branch: refs/heads/4.x-HBase-1.0
Commit: 6a3d6090f2aaa7ce272bbb25b403f6530df58605
Parents: 19012c0
Author: Ankit Singhal 
Authored: Mon Feb 15 22:52:17 2016 +0530
Committer: Ankit Singhal 
Committed: Mon Feb 15 22:52:17 2016 +0530

--
 .../apache/phoenix/query/ConnectionQueryServicesImpl.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a3d6090/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 26d7a3d..a8c9324 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -200,6 +200,12 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
+import co.cask.tephra.TransactionSystemClient;
+import co.cask.tephra.TxConstants;
+import co.cask.tephra.distributed.PooledClientProvider;
+import co.cask.tephra.distributed.TransactionServiceClient;
+
+
 public class ConnectionQueryServicesImpl extends DelegateQueryServices 
implements ConnectionQueryServices {
 private static final Logger logger = 
LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
 private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
@@ -249,10 +255,6 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 private ScheduledExecutorService renewLeaseExecutor;
 private final boolean renewLeaseEnabled;
 
-import co.cask.tephra.TransactionSystemClient;
-import co.cask.tephra.TxConstants;
-import co.cask.tephra.distributed.PooledClientProvider;
-import co.cask.tephra.distributed.TransactionServiceClient;
 
 
 private static interface FeatureSupported {



phoenix git commit: PHOENIX-2671 System.STATS table getting truncated every time on new client connection(addendum)

2016-02-15 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 a8b3a03ef -> 07dd49674


PHOENIX-2671 System.STATS table getting truncated every time on new client 
connection(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/07dd4967
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/07dd4967
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/07dd4967

Branch: refs/heads/4.x-HBase-0.98
Commit: 07dd49674630353dfb0e6310187a341806191250
Parents: a8b3a03
Author: Ankit Singhal 
Authored: Mon Feb 15 22:48:53 2016 +0530
Committer: Ankit Singhal 
Committed: Mon Feb 15 22:48:53 2016 +0530

--
 .../apache/phoenix/query/ConnectionQueryServicesImpl.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/07dd4967/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index a9af8ea..f7a72a0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -201,6 +201,12 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
+import co.cask.tephra.TransactionSystemClient;
+import co.cask.tephra.TxConstants;
+import co.cask.tephra.distributed.PooledClientProvider;
+import co.cask.tephra.distributed.TransactionServiceClient;
+
+
 public class ConnectionQueryServicesImpl extends DelegateQueryServices 
implements ConnectionQueryServices {
 private static final Logger logger = 
LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
 private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
@@ -668,10 +674,6 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 });
 }
 
-import co.cask.tephra.TransactionSystemClient;
-import co.cask.tephra.TxConstants;
-import co.cask.tephra.distributed.PooledClientProvider;
-import co.cask.tephra.distributed.TransactionServiceClient;
 
 
 @Override