phoenix git commit: PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/master 153b357d5 -> fc6cf43a4 PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fc6cf43a Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fc6cf43a Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fc6cf43a Branch: refs/heads/master Commit: fc6cf43a476c2048c2ee4431311487b30517a208 Parents: 153b357 Author: Rajeshbabu Chintaguntla Authored: Thu Apr 19 17:19:20 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Apr 19 17:19:20 2018 +0530 -- ...ReplayWithIndexWritesAndCompressedWALIT.java | 2 +- .../end2end/ColumnProjectionOptimizationIT.java | 14 .../apache/phoenix/end2end/DynamicColumnIT.java | 12 +++ .../apache/phoenix/end2end/DynamicFamilyIT.java | 26 +++--- .../phoenix/end2end/MappingTableDataTypeIT.java | 4 +-- .../phoenix/end2end/NativeHBaseTypesIT.java | 30 .../end2end/QueryDatabaseMetaDataIT.java| 4 +-- .../org/apache/phoenix/end2end/UpgradeIT.java | 4 +-- .../phoenix/tx/ParameterizedTransactionIT.java | 4 +-- .../coprocessor/MetaDataEndpointImpl.java | 36 ++-- .../UngroupedAggregateRegionObserver.java | 2 +- .../apache/phoenix/index/IndexMaintainer.java | 12 +++ .../query/ConnectionQueryServicesImpl.java | 2 +- .../org/apache/phoenix/schema/PTableImpl.java | 4 +-- .../phoenix/schema/stats/StatisticsWriter.java | 9 +++-- .../java/org/apache/phoenix/util/IndexUtil.java | 6 ++-- .../wal/ReadWriteKeyValuesWithCodecTest.java| 6 ++-- .../index/covered/CoveredColumnIndexCodec.java | 2 +- .../index/covered/LocalTableStateTest.java | 10 +++--- .../covered/TestCoveredColumnIndexCodec.java| 6 ++-- .../hbase/index/write/TestIndexWriter.java | 10 ++ .../index/write/TestParalleIndexWriter.java | 2 +- .../write/TestParalleWriterIndexCommitter.java | 2 +- .../index/write/TestWALRecoveryCaching.java | 4 +-- .../recovery/TestPerRegionIndexWriteCache.java | 4 +-- .../java/org/apache/phoenix/util/TestUtil.java | 4 +-- 26 files changed, 106 insertions(+), 115 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index e2ddd4e..49933b2 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -210,7 +210,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); -p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); +p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc6cf43a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java index e4ff66f..43dc302 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java @@ -242,19 +242,19 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT { try { htable = conn2.getQueryServices().getTable(htableName); Put put = new Put(PInteger.INSTANCE.toBytes(1)); -put.add(cfB, c1, PInteger.INSTANCE.toBytes(1)); -put.add(cfC, c2, PLong.INSTANCE.toBytes(2)); +put.addColumn(cfB, c1, PInteger.INSTANCE.toBytes(1)); +put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(2)); htable.put(put); put = new Put(PInteger.INSTANCE.toBytes(2)); -put.add(cfC, c2, PLong.INSTANCE.toBytes(10)); -
phoenix git commit: PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 fdb650851 -> b907d373b PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b907d373 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b907d373 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b907d373 Branch: refs/heads/4.x-HBase-1.1 Commit: b907d373b6eafde198e4a865e052b9b5a4f873a0 Parents: fdb6508 Author: Rajeshbabu Chintaguntla Authored: Mon Apr 23 12:06:21 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Apr 23 12:06:21 2018 +0530 -- ...ReplayWithIndexWritesAndCompressedWALIT.java | 2 +- .../end2end/ColumnProjectionOptimizationIT.java | 14 .../apache/phoenix/end2end/DynamicColumnIT.java | 12 +++ .../apache/phoenix/end2end/DynamicFamilyIT.java | 26 +++--- .../phoenix/end2end/MappingTableDataTypeIT.java | 4 +-- .../phoenix/end2end/NativeHBaseTypesIT.java | 30 .../end2end/QueryDatabaseMetaDataIT.java| 4 +-- .../org/apache/phoenix/end2end/UpgradeIT.java | 4 +-- .../phoenix/tx/ParameterizedTransactionIT.java | 4 +-- .../coprocessor/MetaDataEndpointImpl.java | 36 ++-- .../UngroupedAggregateRegionObserver.java | 2 +- .../apache/phoenix/index/IndexMaintainer.java | 12 +++ .../query/ConnectionQueryServicesImpl.java | 2 +- .../org/apache/phoenix/schema/PTableImpl.java | 4 +-- .../phoenix/schema/stats/StatisticsWriter.java | 9 +++-- .../java/org/apache/phoenix/util/IndexUtil.java | 6 ++-- .../wal/ReadWriteKeyValuesWithCodecTest.java| 6 ++-- .../index/covered/CoveredColumnIndexCodec.java | 2 +- .../index/covered/LocalTableStateTest.java | 10 +++--- .../covered/TestCoveredColumnIndexCodec.java| 6 ++-- .../hbase/index/write/TestIndexWriter.java | 7 ++-- .../index/write/TestParalleIndexWriter.java | 2 +- .../write/TestParalleWriterIndexCommitter.java | 2 +- .../index/write/TestWALRecoveryCaching.java | 4 +-- .../recovery/TestPerRegionIndexWriteCache.java | 4 +-- .../java/org/apache/phoenix/util/TestUtil.java | 4 +-- 26 files changed, 106 insertions(+), 112 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/b907d373/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index dd885b0..4fc4128 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -207,7 +207,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); -p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); +p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down http://git-wip-us.apache.org/repos/asf/phoenix/blob/b907d373/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java index e4ff66f..43dc302 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java @@ -242,19 +242,19 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT { try { htable = conn2.getQueryServices().getTable(htableName); Put put = new Put(PInteger.INSTANCE.toBytes(1)); -put.add(cfB, c1, PInteger.INSTANCE.toBytes(1)); -put.add(cfC, c2, PLong.INSTANCE.toBytes(2)); +put.addColumn(cfB, c1, PInteger.INSTANCE.toBytes(1)); +put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(2)); htable.put(put); put = new Put(PInteger.INSTANCE.toBytes(2)); -put.add(cfC, c2, PLon
phoenix git commit: PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 bc8ad88c3 -> afc6627bd PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/afc6627b Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/afc6627b Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/afc6627b Branch: refs/heads/4.x-HBase-1.3 Commit: afc6627bd6828612a92390eb8489f7e082b392ad Parents: bc8ad88 Author: Rajeshbabu Chintaguntla Authored: Mon Apr 23 12:16:15 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Apr 23 12:16:15 2018 +0530 -- ...ReplayWithIndexWritesAndCompressedWALIT.java | 2 +- .../end2end/ColumnProjectionOptimizationIT.java | 14 .../apache/phoenix/end2end/DynamicColumnIT.java | 12 +++ .../apache/phoenix/end2end/DynamicFamilyIT.java | 26 +++--- .../phoenix/end2end/MappingTableDataTypeIT.java | 4 +-- .../phoenix/end2end/NativeHBaseTypesIT.java | 30 .../end2end/QueryDatabaseMetaDataIT.java| 4 +-- .../org/apache/phoenix/end2end/UpgradeIT.java | 4 +-- .../phoenix/tx/ParameterizedTransactionIT.java | 4 +-- .../coprocessor/MetaDataEndpointImpl.java | 36 ++-- .../UngroupedAggregateRegionObserver.java | 2 +- .../apache/phoenix/index/IndexMaintainer.java | 12 +++ .../query/ConnectionQueryServicesImpl.java | 2 +- .../org/apache/phoenix/schema/PTableImpl.java | 4 +-- .../phoenix/schema/stats/StatisticsWriter.java | 9 +++-- .../java/org/apache/phoenix/util/IndexUtil.java | 6 ++-- .../wal/ReadWriteKeyValuesWithCodecTest.java| 6 ++-- .../index/covered/CoveredColumnIndexCodec.java | 2 +- .../index/covered/LocalTableStateTest.java | 10 +++--- .../covered/TestCoveredColumnIndexCodec.java| 6 ++-- .../hbase/index/write/TestIndexWriter.java | 7 ++-- .../index/write/TestParalleIndexWriter.java | 2 +- .../write/TestParalleWriterIndexCommitter.java | 2 +- .../index/write/TestWALRecoveryCaching.java | 4 +-- .../recovery/TestPerRegionIndexWriteCache.java | 4 +-- .../java/org/apache/phoenix/util/TestUtil.java | 4 +-- 26 files changed, 106 insertions(+), 112 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/afc6627b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index e2ddd4e..49933b2 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -210,7 +210,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); -p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); +p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down http://git-wip-us.apache.org/repos/asf/phoenix/blob/afc6627b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java index e4ff66f..43dc302 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java @@ -242,19 +242,19 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT { try { htable = conn2.getQueryServices().getTable(htableName); Put put = new Put(PInteger.INSTANCE.toBytes(1)); -put.add(cfB, c1, PInteger.INSTANCE.toBytes(1)); -put.add(cfC, c2, PLong.INSTANCE.toBytes(2)); +put.addColumn(cfB, c1, PInteger.INSTANCE.toBytes(1)); +put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(2)); htable.put(put); put = new Put(PInteger.INSTANCE.toBytes(2)); -put.add(cfC, c2, PLon
phoenix git commit: PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/master 78636a3c2 -> 8f1cef824 PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8f1cef82 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8f1cef82 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8f1cef82 Branch: refs/heads/master Commit: 8f1cef824b086c7c697688767e0460c18fa554d6 Parents: 78636a3 Author: Rajeshbabu Chintaguntla Authored: Tue Apr 24 11:24:43 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Apr 24 11:24:43 2018 +0530 -- .../apache/phoenix/hive/HivePhoenixStoreIT.java | 27 .../PhoenixStorageHandlerConstants.java | 8 +++--- .../phoenix/hive/query/PhoenixQueryBuilder.java | 4 +-- 3 files changed, 33 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/8f1cef82/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java -- diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java index 1828818..66f99ad 100644 --- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java +++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java @@ -309,6 +309,33 @@ public class HivePhoenixStoreIT extends BaseHivePhoenixStoreIT { assert (rs.getString(2).equalsIgnoreCase("part1")); assert (rs.getDouble(3) == 200); } +} + +@Test +public void testTimestampPredicate() throws Exception { +String testName = "testTimeStampPredicate"; +hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out")); +createFile("10\t2013-01-02 01:01:01.123456\n", new Path(hiveOutputDir, testName + ".out").toString()); +createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString()); +StringBuilder sb = new StringBuilder(); +sb.append("CREATE TABLE timeStampTable(ID int,ts TIMESTAMP)" + HiveTestUtil.CRLF + +" STORED BY \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil +.CRLF + +" TBLPROPERTIES(" + HiveTestUtil.CRLF + +" 'phoenix.hbase.table.name'='TIMESTAMPTABLE'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.client.port'='" + +hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF + +" 'phoenix.column.mapping' = 'id:ID, ts:TS'," + HiveTestUtil.CRLF + +" 'phoenix.rowkeys'='id');" + HiveTestUtil.CRLF); +sb.append("INSERT INTO TABLE timeStampTable VALUES (10, \"2013-01-02 01:01:01.123456\");" + HiveTestUtil.CRLF); +sb.append("SELECT * from timeStampTable WHERE ts between '2013-01-02 01:01:01.123455' and " + +" '2013-01-02 12:01:02.123457789' AND id = 10;" + HiveTestUtil.CRLF); + +String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString(); +createFile(sb.toString(), fullPath); +runTest(testName, fullPath); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8f1cef82/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java -- diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java index 07c374e..e3c7d84 100644 --- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java +++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java @@ -90,13 +90,13 @@ public class PhoenixStorageHandlerConstants { public static final String PATERN_MARKER = "$targetPattern$"; public static final String DATE_PATTERN = "'?\\d{4}-
phoenix git commit: PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 afc6627bd -> e578a869c PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e578a869 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e578a869 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e578a869 Branch: refs/heads/4.x-HBase-1.3 Commit: e578a869cec2c9136aa6ecfcd60fd46bc4f434f4 Parents: afc6627 Author: Rajeshbabu Chintaguntla Authored: Tue Apr 24 11:25:29 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Apr 24 11:25:29 2018 +0530 -- .../apache/phoenix/hive/HivePhoenixStoreIT.java | 27 .../PhoenixStorageHandlerConstants.java | 8 +++--- .../phoenix/hive/query/PhoenixQueryBuilder.java | 4 +-- 3 files changed, 33 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/e578a869/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java -- diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java index 1828818..66f99ad 100644 --- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java +++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java @@ -309,6 +309,33 @@ public class HivePhoenixStoreIT extends BaseHivePhoenixStoreIT { assert (rs.getString(2).equalsIgnoreCase("part1")); assert (rs.getDouble(3) == 200); } +} + +@Test +public void testTimestampPredicate() throws Exception { +String testName = "testTimeStampPredicate"; +hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out")); +createFile("10\t2013-01-02 01:01:01.123456\n", new Path(hiveOutputDir, testName + ".out").toString()); +createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString()); +StringBuilder sb = new StringBuilder(); +sb.append("CREATE TABLE timeStampTable(ID int,ts TIMESTAMP)" + HiveTestUtil.CRLF + +" STORED BY \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil +.CRLF + +" TBLPROPERTIES(" + HiveTestUtil.CRLF + +" 'phoenix.hbase.table.name'='TIMESTAMPTABLE'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.client.port'='" + +hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF + +" 'phoenix.column.mapping' = 'id:ID, ts:TS'," + HiveTestUtil.CRLF + +" 'phoenix.rowkeys'='id');" + HiveTestUtil.CRLF); +sb.append("INSERT INTO TABLE timeStampTable VALUES (10, \"2013-01-02 01:01:01.123456\");" + HiveTestUtil.CRLF); +sb.append("SELECT * from timeStampTable WHERE ts between '2013-01-02 01:01:01.123455' and " + +" '2013-01-02 12:01:02.123457789' AND id = 10;" + HiveTestUtil.CRLF); + +String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString(); +createFile(sb.toString(), fullPath); +runTest(testName, fullPath); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/e578a869/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java -- diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java index 07c374e..e3c7d84 100644 --- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java +++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java @@ -90,13 +90,13 @@ public class PhoenixStorageHandlerConstants { public static final String PATERN_MARKER = "$targetPattern$"; public static final String DATE_PATTERN = "'?\\d{4}-
phoenix git commit: PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 1e5943f7e -> 8c0a76124 PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8c0a7612 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8c0a7612 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8c0a7612 Branch: refs/heads/4.x-HBase-1.2 Commit: 8c0a761247f57f99724a633987c2e2e18bc6ae53 Parents: 1e5943f Author: Rajeshbabu Chintaguntla Authored: Tue Apr 24 11:25:57 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Apr 24 11:25:57 2018 +0530 -- .../apache/phoenix/hive/HivePhoenixStoreIT.java | 27 .../PhoenixStorageHandlerConstants.java | 8 +++--- .../phoenix/hive/query/PhoenixQueryBuilder.java | 4 +-- 3 files changed, 33 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c0a7612/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java -- diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java index 1828818..66f99ad 100644 --- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java +++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java @@ -309,6 +309,33 @@ public class HivePhoenixStoreIT extends BaseHivePhoenixStoreIT { assert (rs.getString(2).equalsIgnoreCase("part1")); assert (rs.getDouble(3) == 200); } +} + +@Test +public void testTimestampPredicate() throws Exception { +String testName = "testTimeStampPredicate"; +hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out")); +createFile("10\t2013-01-02 01:01:01.123456\n", new Path(hiveOutputDir, testName + ".out").toString()); +createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString()); +StringBuilder sb = new StringBuilder(); +sb.append("CREATE TABLE timeStampTable(ID int,ts TIMESTAMP)" + HiveTestUtil.CRLF + +" STORED BY \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil +.CRLF + +" TBLPROPERTIES(" + HiveTestUtil.CRLF + +" 'phoenix.hbase.table.name'='TIMESTAMPTABLE'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.client.port'='" + +hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF + +" 'phoenix.column.mapping' = 'id:ID, ts:TS'," + HiveTestUtil.CRLF + +" 'phoenix.rowkeys'='id');" + HiveTestUtil.CRLF); +sb.append("INSERT INTO TABLE timeStampTable VALUES (10, \"2013-01-02 01:01:01.123456\");" + HiveTestUtil.CRLF); +sb.append("SELECT * from timeStampTable WHERE ts between '2013-01-02 01:01:01.123455' and " + +" '2013-01-02 12:01:02.123457789' AND id = 10;" + HiveTestUtil.CRLF); + +String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString(); +createFile(sb.toString(), fullPath); +runTest(testName, fullPath); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c0a7612/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java -- diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java index 07c374e..e3c7d84 100644 --- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java +++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java @@ -90,13 +90,13 @@ public class PhoenixStorageHandlerConstants { public static final String PATERN_MARKER = "$targetPattern$"; public static final String DATE_PATTERN = "'?\\d{4}-
phoenix git commit: PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 b907d373b -> 16a353b35 PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/16a353b3 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/16a353b3 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/16a353b3 Branch: refs/heads/4.x-HBase-1.1 Commit: 16a353b3587205c32915f7a10f0e13800a643998 Parents: b907d37 Author: Rajeshbabu Chintaguntla Authored: Tue Apr 24 11:26:30 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Apr 24 11:26:30 2018 +0530 -- .../apache/phoenix/hive/HivePhoenixStoreIT.java | 27 .../PhoenixStorageHandlerConstants.java | 8 +++--- .../phoenix/hive/query/PhoenixQueryBuilder.java | 4 +-- 3 files changed, 33 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/16a353b3/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java -- diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java index 1828818..66f99ad 100644 --- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java +++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java @@ -309,6 +309,33 @@ public class HivePhoenixStoreIT extends BaseHivePhoenixStoreIT { assert (rs.getString(2).equalsIgnoreCase("part1")); assert (rs.getDouble(3) == 200); } +} + +@Test +public void testTimestampPredicate() throws Exception { +String testName = "testTimeStampPredicate"; +hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out")); +createFile("10\t2013-01-02 01:01:01.123456\n", new Path(hiveOutputDir, testName + ".out").toString()); +createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString()); +StringBuilder sb = new StringBuilder(); +sb.append("CREATE TABLE timeStampTable(ID int,ts TIMESTAMP)" + HiveTestUtil.CRLF + +" STORED BY \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil +.CRLF + +" TBLPROPERTIES(" + HiveTestUtil.CRLF + +" 'phoenix.hbase.table.name'='TIMESTAMPTABLE'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.client.port'='" + +hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF + +" 'phoenix.column.mapping' = 'id:ID, ts:TS'," + HiveTestUtil.CRLF + +" 'phoenix.rowkeys'='id');" + HiveTestUtil.CRLF); +sb.append("INSERT INTO TABLE timeStampTable VALUES (10, \"2013-01-02 01:01:01.123456\");" + HiveTestUtil.CRLF); +sb.append("SELECT * from timeStampTable WHERE ts between '2013-01-02 01:01:01.123455' and " + +" '2013-01-02 12:01:02.123457789' AND id = 10;" + HiveTestUtil.CRLF); + +String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString(); +createFile(sb.toString(), fullPath); +runTest(testName, fullPath); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/16a353b3/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java -- diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java index 07c374e..e3c7d84 100644 --- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java +++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java @@ -90,13 +90,13 @@ public class PhoenixStorageHandlerConstants { public static final String PATERN_MARKER = "$targetPattern$"; public static final String DATE_PATTERN = "'?\\d{4}-
phoenix git commit: PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-0.98 2a5c0b986 -> 4c31ac3f5 PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4c31ac3f Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4c31ac3f Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4c31ac3f Branch: refs/heads/4.x-HBase-0.98 Commit: 4c31ac3f5f4099d93885e26ebcd91f1991ebf954 Parents: 2a5c0b9 Author: Rajeshbabu Chintaguntla Authored: Tue Apr 24 11:27:09 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Apr 24 11:27:09 2018 +0530 -- .../apache/phoenix/hive/HivePhoenixStoreIT.java | 27 .../PhoenixStorageHandlerConstants.java | 8 +++--- .../phoenix/hive/query/PhoenixQueryBuilder.java | 4 +-- 3 files changed, 33 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c31ac3f/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java -- diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java index 1828818..66f99ad 100644 --- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java +++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java @@ -309,6 +309,33 @@ public class HivePhoenixStoreIT extends BaseHivePhoenixStoreIT { assert (rs.getString(2).equalsIgnoreCase("part1")); assert (rs.getDouble(3) == 200); } +} + +@Test +public void testTimestampPredicate() throws Exception { +String testName = "testTimeStampPredicate"; +hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out")); +createFile("10\t2013-01-02 01:01:01.123456\n", new Path(hiveOutputDir, testName + ".out").toString()); +createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString()); +StringBuilder sb = new StringBuilder(); +sb.append("CREATE TABLE timeStampTable(ID int,ts TIMESTAMP)" + HiveTestUtil.CRLF + +" STORED BY \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil +.CRLF + +" TBLPROPERTIES(" + HiveTestUtil.CRLF + +" 'phoenix.hbase.table.name'='TIMESTAMPTABLE'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.client.port'='" + +hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF + +" 'phoenix.column.mapping' = 'id:ID, ts:TS'," + HiveTestUtil.CRLF + +" 'phoenix.rowkeys'='id');" + HiveTestUtil.CRLF); +sb.append("INSERT INTO TABLE timeStampTable VALUES (10, \"2013-01-02 01:01:01.123456\");" + HiveTestUtil.CRLF); +sb.append("SELECT * from timeStampTable WHERE ts between '2013-01-02 01:01:01.123455' and " + +" '2013-01-02 12:01:02.123457789' AND id = 10;" + HiveTestUtil.CRLF); + +String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString(); +createFile(sb.toString(), fullPath); +runTest(testName, fullPath); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c31ac3f/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java -- diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java index 07c374e..e3c7d84 100644 --- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java +++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java @@ -90,13 +90,13 @@ public class PhoenixStorageHandlerConstants { public static final String PATERN_MARKER = "$targetPattern$"; public static final String DATE_PATTERN = "'?\\d{4}-
phoenix git commit: PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 f640e2dd5 -> b7321c117 PHOENIX-4645 PhoenixStorageHandler doesn't handle correctly data/timestamp in push down predicate when engine is tez.(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b7321c11 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b7321c11 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b7321c11 Branch: refs/heads/5.x-HBase-2.0 Commit: b7321c117c6321f6c8cb4627b9c2daaf2526f90c Parents: f640e2d Author: Rajeshbabu Chintaguntla Authored: Tue Apr 24 11:28:15 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Apr 24 11:28:15 2018 +0530 -- .../apache/phoenix/hive/HivePhoenixStoreIT.java | 27 .../PhoenixStorageHandlerConstants.java | 8 +++--- .../phoenix/hive/query/PhoenixQueryBuilder.java | 4 +-- 3 files changed, 33 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7321c11/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java -- diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java index d4e7005..a62d780 100644 --- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java +++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java @@ -309,6 +309,33 @@ public class HivePhoenixStoreIT extends BaseHivePhoenixStoreIT { assert (rs.getString(2).equalsIgnoreCase("part1")); assert (rs.getDouble(3) == 200); } +} + +@Test +public void testTimestampPredicate() throws Exception { +String testName = "testTimeStampPredicate"; +hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out")); +createFile("10\t2013-01-02 01:01:01.123456\n", new Path(hiveOutputDir, testName + ".out").toString()); +createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString()); +StringBuilder sb = new StringBuilder(); +sb.append("CREATE TABLE timeStampTable(ID int,ts TIMESTAMP)" + HiveTestUtil.CRLF + +" STORED BY \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil +.CRLF + +" TBLPROPERTIES(" + HiveTestUtil.CRLF + +" 'phoenix.hbase.table.name'='TIMESTAMPTABLE'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF + +" 'phoenix.zookeeper.client.port'='" + +hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF + +" 'phoenix.column.mapping' = 'id:ID, ts:TS'," + HiveTestUtil.CRLF + +" 'phoenix.rowkeys'='id');" + HiveTestUtil.CRLF); +sb.append("INSERT INTO TABLE timeStampTable VALUES (10, \"2013-01-02 01:01:01.123456\");" + HiveTestUtil.CRLF); +sb.append("SELECT * from timeStampTable WHERE ts between '2013-01-02 01:01:01.123455' and " + +" '2013-01-02 12:01:02.123457789' AND id = 10;" + HiveTestUtil.CRLF); + +String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString(); +createFile(sb.toString(), fullPath); +runTest(testName, fullPath); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7321c11/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java -- diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java index 07c374e..e3c7d84 100644 --- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java +++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java @@ -90,13 +90,13 @@ public class PhoenixStorageHandlerConstants { public static final String PATERN_MARKER = "$targetPattern$"; public static final String DATE_PATTERN = "'?\\d{4}-
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master 40ff0b95e -> 0675fe545 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0675fe54 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0675fe54 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0675fe54 Branch: refs/heads/master Commit: 0675fe5459dae5bab27934d8d35ec257fe733f3c Parents: 40ff0b9 Author: Rajeshbabu Chintaguntla Authored: Mon May 7 18:56:34 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon May 7 18:56:34 2018 +0530 -- .../DelegateRegionCoprocessorEnvironment.java | 8 +- .../UngroupedAggregateRegionObserver.java | 14 +- .../org/apache/phoenix/hbase/index/Indexer.java | 19 +-- .../hbase/index/write/IndexWriterUtils.java | 27 +--- .../index/PhoenixTransactionalIndexer.java | 18 +-- .../org/apache/phoenix/util/ServerUtil.java | 140 --- 6 files changed, 141 insertions(+), 85 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/0675fe54/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java index a887632..71ed20e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.phoenix.hbase.index.table.HTableFactory; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; /** * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix coprocessors. Often we @@ -45,10 +46,10 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn private RegionCoprocessorEnvironment delegate; private HTableFactory tableFactory; -public DelegateRegionCoprocessorEnvironment(Configuration config, RegionCoprocessorEnvironment delegate) { -this.config = config; +public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate, ConnectionType connectionType) { +this.config = ServerUtil.ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration()); this.delegate = delegate; -this.tableFactory = ServerUtil.getDelegateHTableFactory(this, config); +this.tableFactory = ServerUtil.getDelegateHTableFactory(this, connectionType); } @Override @@ -121,5 +122,4 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn public MetricRegistry getMetricRegistryForRegionServer() { return delegate.getMetricRegistryForRegionServer(); } - } http://git-wip-us.apache.org/repos/asf/phoenix/blob/0675fe54/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 6bee65c..14213f4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -144,6 +144,7 @@ import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; import org.apache.phoenix.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -225,14 +226,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); -compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); -// lower the number of rpc retries, so we don't hang the compaction -compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIE
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 6db0cb04d -> 254c0a2ae PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/254c0a2a Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/254c0a2a Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/254c0a2a Branch: refs/heads/5.x-HBase-2.0 Commit: 254c0a2ae14605402fa8ac7d3402fc587a52102d Parents: 6db0cb0 Author: Rajeshbabu Chintaguntla Authored: Mon May 7 23:02:43 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon May 7 23:02:43 2018 +0530 -- .../DelegateRegionCoprocessorEnvironment.java | 11 +- .../UngroupedAggregateRegionObserver.java | 12 +- .../org/apache/phoenix/hbase/index/Indexer.java | 19 +--- .../hbase/index/write/IndexWriterUtils.java | 56 +++-- .../index/PhoenixTransactionalIndexer.java | 18 +-- .../stats/DefaultStatisticsCollector.java | 4 +- .../phoenix/schema/stats/StatisticsWriter.java | 4 +- .../org/apache/phoenix/util/ServerUtil.java | 114 ++- 8 files changed, 150 insertions(+), 88 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/254c0a2a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java index 7c07068..d8c7a5d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.RawCellBuilder; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Connection; @@ -31,6 +30,8 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.regionserver.OnlineRegions; import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.phoenix.util.ServerUtil.ConnectionFactory; +import org.apache.phoenix.util.ServerUtil.ConnectionType; /** * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix coprocessors. Often we @@ -41,10 +42,12 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn private final Configuration config; private RegionCoprocessorEnvironment delegate; +private ConnectionType connectionType; -public DelegateRegionCoprocessorEnvironment(Configuration config, RegionCoprocessorEnvironment delegate) { -this.config = config; +public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate, ConnectionType connectionType) { this.delegate = delegate; +this.connectionType = connectionType; +this.config = ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration()); } @Override @@ -109,7 +112,7 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn @Override public Connection getConnection() { -return delegate.getConnection(); +return ConnectionFactory.getConnection(connectionType, delegate); } @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/254c0a2a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 5743523..7fad5cb 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -153,6 +153,7 @@ import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; import org.apache.phoenix.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -239,14 +24
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 190e768a6 -> 35d202019 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/35d20201 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/35d20201 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/35d20201 Branch: refs/heads/4.x-HBase-1.1 Commit: 35d2020191e4087632e019475a819b063681a11e Parents: 190e768 Author: Rajeshbabu Chintaguntla Authored: Tue May 8 09:26:41 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue May 8 09:26:41 2018 +0530 -- .../DelegateRegionCoprocessorEnvironment.java | 7 +- .../UngroupedAggregateRegionObserver.java | 14 +- .../org/apache/phoenix/hbase/index/Indexer.java | 19 +-- .../hbase/index/write/IndexWriterUtils.java | 27 +--- .../index/PhoenixTransactionalIndexer.java | 18 +-- .../org/apache/phoenix/util/ServerUtil.java | 141 --- 6 files changed, 142 insertions(+), 84 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/35d20201/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java index 284d53c..a791f4a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.phoenix.hbase.index.table.HTableFactory; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; /** * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix coprocessors. Often we @@ -44,10 +45,10 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn private RegionCoprocessorEnvironment delegate; private HTableFactory tableFactory; -public DelegateRegionCoprocessorEnvironment(Configuration config, RegionCoprocessorEnvironment delegate) { -this.config = config; +public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate, ConnectionType connectionType) { +this.config = ServerUtil.ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration()); this.delegate = delegate; -this.tableFactory = ServerUtil.getDelegateHTableFactory(this, config); +this.tableFactory = ServerUtil.getDelegateHTableFactory(this, connectionType); } @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/35d20201/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 6bee65c..14213f4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -144,6 +144,7 @@ import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; import org.apache.phoenix.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -225,14 +226,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); -compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); -// lower the number of rpc retries, so we don't hang the compaction -compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER, -QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER)); -compactionConfig.setInt(HConstants.HBASE_CLIE
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 6d994b02b -> b792c06c5 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b792c06c Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b792c06c Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b792c06c Branch: refs/heads/4.x-HBase-1.2 Commit: b792c06c5a3805721b7fb5e635c5cbc2d30d0a12 Parents: 6d994b0 Author: Rajeshbabu Chintaguntla Authored: Tue May 8 09:27:16 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue May 8 09:27:16 2018 +0530 -- .../DelegateRegionCoprocessorEnvironment.java | 7 +- .../UngroupedAggregateRegionObserver.java | 14 +- .../org/apache/phoenix/hbase/index/Indexer.java | 19 +-- .../hbase/index/write/IndexWriterUtils.java | 27 +--- .../index/PhoenixTransactionalIndexer.java | 18 +-- .../org/apache/phoenix/util/ServerUtil.java | 141 --- 6 files changed, 142 insertions(+), 84 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/b792c06c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java index 284d53c..a791f4a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.phoenix.hbase.index.table.HTableFactory; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; /** * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix coprocessors. Often we @@ -44,10 +45,10 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn private RegionCoprocessorEnvironment delegate; private HTableFactory tableFactory; -public DelegateRegionCoprocessorEnvironment(Configuration config, RegionCoprocessorEnvironment delegate) { -this.config = config; +public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate, ConnectionType connectionType) { +this.config = ServerUtil.ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration()); this.delegate = delegate; -this.tableFactory = ServerUtil.getDelegateHTableFactory(this, config); +this.tableFactory = ServerUtil.getDelegateHTableFactory(this, connectionType); } @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/b792c06c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 6bee65c..14213f4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -144,6 +144,7 @@ import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; import org.apache.phoenix.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -225,14 +226,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); -compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); -// lower the number of rpc retries, so we don't hang the compaction -compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER, -QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER)); -compactionConfig.setInt(HConstants.HBASE_CLIE
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 d0f98a020 -> 4082c73ee PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4082c73e Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4082c73e Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4082c73e Branch: refs/heads/4.x-HBase-1.3 Commit: 4082c73ee23d901642d8c5bc45ececfcf5e50ede Parents: d0f98a0 Author: Rajeshbabu Chintaguntla Authored: Tue May 8 12:06:49 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue May 8 12:06:49 2018 +0530 -- .../DelegateRegionCoprocessorEnvironment.java | 7 +- .../UngroupedAggregateRegionObserver.java | 14 +- .../org/apache/phoenix/hbase/index/Indexer.java | 19 +-- .../hbase/index/write/IndexWriterUtils.java | 27 +--- .../index/PhoenixTransactionalIndexer.java | 18 +-- .../org/apache/phoenix/util/ServerUtil.java | 141 --- 6 files changed, 142 insertions(+), 84 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/4082c73e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java index 284d53c..a791f4a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.phoenix.hbase.index.table.HTableFactory; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; /** * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix coprocessors. Often we @@ -44,10 +45,10 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn private RegionCoprocessorEnvironment delegate; private HTableFactory tableFactory; -public DelegateRegionCoprocessorEnvironment(Configuration config, RegionCoprocessorEnvironment delegate) { -this.config = config; +public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate, ConnectionType connectionType) { +this.config = ServerUtil.ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration()); this.delegate = delegate; -this.tableFactory = ServerUtil.getDelegateHTableFactory(this, config); +this.tableFactory = ServerUtil.getDelegateHTableFactory(this, connectionType); } @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/4082c73e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 6bee65c..14213f4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -144,6 +144,7 @@ import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; import org.apache.phoenix.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -225,14 +226,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); -compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); -// lower the number of rpc retries, so we don't hang the compaction -compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER, -QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER)); -compactionConfig.setInt(HConstants.HBASE_CLIE
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-0.98 3b45df999 -> b09f26b73 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b09f26b7 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b09f26b7 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b09f26b7 Branch: refs/heads/4.x-HBase-0.98 Commit: b09f26b73e839dbb1adcce4b295afbea63c7c309 Parents: 3b45df9 Author: Rajeshbabu Chintaguntla Authored: Tue May 8 16:19:16 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue May 8 16:19:16 2018 +0530 -- .../DelegateRegionCoprocessorEnvironment.java | 7 +- .../UngroupedAggregateRegionObserver.java | 12 +- .../org/apache/phoenix/hbase/index/Indexer.java | 19 +-- .../hbase/index/write/IndexWriterUtils.java | 27 +--- .../index/PhoenixTransactionalIndexer.java | 18 +-- .../org/apache/phoenix/util/ServerUtil.java | 139 --- 6 files changed, 138 insertions(+), 84 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/b09f26b7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java index a3a8bc3..9701f5e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.phoenix.hbase.index.table.HTableFactory; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; /** * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix coprocessors. Often we @@ -44,10 +45,10 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn private RegionCoprocessorEnvironment delegate; private HTableFactory tableFactory; -public DelegateRegionCoprocessorEnvironment(Configuration config, RegionCoprocessorEnvironment delegate) { -this.config = config; +public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate, ConnectionType connectionType) { +this.config = ServerUtil.ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration()); this.delegate = delegate; -this.tableFactory = ServerUtil.getDelegateHTableFactory(this, config); +this.tableFactory = ServerUtil.getDelegateHTableFactory(this, connectionType); } @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/b09f26b7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index f3c7679..e8658a6 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -145,6 +145,7 @@ import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; +import org.apache.phoenix.util.ServerUtil.ConnectionType; import org.apache.phoenix.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -226,14 +227,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); -compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); -// lower the number of rpc retries, so we don't hang the compaction -compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER, -QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER)); -compactionConfig.setInt(HConstants.HBASE_CLIE
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master 0675fe545 -> ea06a10a3 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ea06a10a Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ea06a10a Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ea06a10a Branch: refs/heads/master Commit: ea06a10a35af8eb0773b077cbbe704275bb7869c Parents: 0675fe5 Author: Rajeshbabu Chintaguntla Authored: Tue May 8 20:32:04 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue May 8 20:32:04 2018 +0530 -- .../java/org/apache/phoenix/util/ServerUtil.java | 18 +- 1 file changed, 9 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/ea06a10a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 1043fd2..d80b6fc 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -27,7 +27,6 @@ import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutorService; -import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -345,16 +344,17 @@ public class ServerUtil { new ConcurrentHashMap(); public static ClusterConnection getConnection(final ConnectionType connectionType, final Configuration conf, final HRegionServer server) throws IOException { -return connections.computeIfAbsent(connectionType, new Function() { -@Override -public ClusterConnection apply(ConnectionType t) { -try { -return new CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), server); -} catch (IOException e) { - throw new RuntimeException(e); +ClusterConnection connection = null; +if((connection = connections.get(connectionType)) == null) { +synchronized (CoprocessorHConnectionTableFactory.class) { +if(connections.get(connectionType) == null) { +connection = new CoprocessorHConnection(conf, server); +connections.put(connectionType, connection); +return connection; } } -}); +} +return connection; } public static Configuration getTypeSpecificConfiguration(ConnectionType connectionType, Configuration conf) {
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 35d202019 -> 9178f569c PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9178f569 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9178f569 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9178f569 Branch: refs/heads/4.x-HBase-1.1 Commit: 9178f569ca50a3ec90fffd5fa8de46cda9f5cafc Parents: 35d2020 Author: Rajeshbabu Chintaguntla Authored: Tue May 8 20:37:50 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue May 8 20:37:50 2018 +0530 -- phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/9178f569/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 5097245..451bc52 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -27,7 +27,6 @@ import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutorService; -import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern;
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 b792c06c5 -> 292ec36a3 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/292ec36a Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/292ec36a Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/292ec36a Branch: refs/heads/4.x-HBase-1.2 Commit: 292ec36a3d8e854e6b0375797cf478e241576aeb Parents: b792c06 Author: Rajeshbabu Chintaguntla Authored: Tue May 8 20:49:43 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue May 8 20:49:43 2018 +0530 -- phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/292ec36a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 2dab076..d80b6fc 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -27,7 +27,6 @@ import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutorService; -import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern;
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 4082c73ee -> d38822ed7 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d38822ed Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d38822ed Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d38822ed Branch: refs/heads/4.x-HBase-1.3 Commit: d38822ed7cc6a39578a2423556a036c5d48d8540 Parents: 4082c73 Author: Rajeshbabu Chintaguntla Authored: Tue May 8 20:50:25 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue May 8 20:50:25 2018 +0530 -- phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/d38822ed/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 2dab076..d80b6fc 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -27,7 +27,6 @@ import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutorService; -import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern;
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 292ec36a3 -> f186c3080 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f186c308 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f186c308 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f186c308 Branch: refs/heads/4.x-HBase-1.2 Commit: f186c3080fef2bbbf4ef6c22028daf6c976c55f4 Parents: 292ec36 Author: Rajeshbabu Chintaguntla Authored: Wed May 9 18:11:29 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed May 9 18:11:29 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/f186c308/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index d80b6fc..9190373 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -66,8 +67,6 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.StaleRegionBoundaryCacheException; -import org.jboss.netty.util.internal.ConcurrentHashMap; - @SuppressWarnings("deprecation") public class ServerUtil {
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master ea06a10a3 -> 637cedbd4 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/637cedbd Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/637cedbd Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/637cedbd Branch: refs/heads/master Commit: 637cedbd40b4eacb2189fc3d88b49671dce44eca Parents: ea06a10 Author: Rajeshbabu Chintaguntla Authored: Wed May 9 18:12:42 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed May 9 18:12:42 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/637cedbd/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index d80b6fc..9190373 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -66,8 +67,6 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.StaleRegionBoundaryCacheException; -import org.jboss.netty.util.internal.ConcurrentHashMap; - @SuppressWarnings("deprecation") public class ServerUtil {
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 d38822ed7 -> 7e5c3871d PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7e5c3871 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7e5c3871 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7e5c3871 Branch: refs/heads/4.x-HBase-1.3 Commit: 7e5c3871dc55e00d800c8a2cf17f44d45cb8a194 Parents: d38822e Author: Rajeshbabu Chintaguntla Authored: Wed May 9 18:13:13 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed May 9 18:13:13 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e5c3871/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index d80b6fc..9190373 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -66,8 +67,6 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.StaleRegionBoundaryCacheException; -import org.jboss.netty.util.internal.ConcurrentHashMap; - @SuppressWarnings("deprecation") public class ServerUtil {
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 9178f569c -> 6f52b737d PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6f52b737 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6f52b737 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6f52b737 Branch: refs/heads/4.x-HBase-1.1 Commit: 6f52b737d8902800692f62ded13dffa0355465c9 Parents: 9178f56 Author: Rajeshbabu Chintaguntla Authored: Wed May 9 18:13:38 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed May 9 18:13:38 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/6f52b737/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 451bc52..5d2a94c 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -66,8 +67,6 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.StaleRegionBoundaryCacheException; -import org.jboss.netty.util.internal.ConcurrentHashMap; - @SuppressWarnings("deprecation") public class ServerUtil {
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master 0c8349e3c -> 56f109603 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/56f10960 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/56f10960 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/56f10960 Branch: refs/heads/master Commit: 56f109603cec93f3904366d4bb23415981947ae0 Parents: 0c8349e Author: Rajeshbabu Chintaguntla Authored: Fri May 11 23:19:21 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri May 11 23:19:21 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/56f10960/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 9190373..891839a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -346,8 +346,8 @@ public class ServerUtil { ClusterConnection connection = null; if((connection = connections.get(connectionType)) == null) { synchronized (CoprocessorHConnectionTableFactory.class) { -if(connections.get(connectionType) == null) { -connection = new CoprocessorHConnection(conf, server); +if((connection = connections.get(connectionType)) == null) { +connection = new CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), server); connections.put(connectionType, connection); return connection; } @@ -405,7 +405,7 @@ public class ServerUtil { } public static Configuration getIndexWriterConfigurationWithCustomThreads(Configuration conf) { -Configuration clonedConfig = PropertiesUtil.cloneConfig(conf); +Configuration clonedConfig = getIndexWriterConnection(conf); setHTableThreads(clonedConfig); return clonedConfig; }
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 72fa8749e -> 39b92bf9e PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/39b92bf9 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/39b92bf9 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/39b92bf9 Branch: refs/heads/4.x-HBase-1.3 Commit: 39b92bf9e8d9cae46b1fa230d91ac04a8e49e629 Parents: 72fa874 Author: Rajeshbabu Chintaguntla Authored: Fri May 11 23:20:52 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri May 11 23:20:52 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/39b92bf9/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 9190373..891839a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -346,8 +346,8 @@ public class ServerUtil { ClusterConnection connection = null; if((connection = connections.get(connectionType)) == null) { synchronized (CoprocessorHConnectionTableFactory.class) { -if(connections.get(connectionType) == null) { -connection = new CoprocessorHConnection(conf, server); +if((connection = connections.get(connectionType)) == null) { +connection = new CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), server); connections.put(connectionType, connection); return connection; } @@ -405,7 +405,7 @@ public class ServerUtil { } public static Configuration getIndexWriterConfigurationWithCustomThreads(Configuration conf) { -Configuration clonedConfig = PropertiesUtil.cloneConfig(conf); +Configuration clonedConfig = getIndexWriterConnection(conf); setHTableThreads(clonedConfig); return clonedConfig; }
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 141686ab5 -> 64b77450a PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/64b77450 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/64b77450 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/64b77450 Branch: refs/heads/4.x-HBase-1.2 Commit: 64b77450a8ca45686f7e7d0abf14231dccda2d32 Parents: 141686a Author: Rajeshbabu Chintaguntla Authored: Fri May 11 23:21:55 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri May 11 23:21:55 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/64b77450/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 9190373..891839a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -346,8 +346,8 @@ public class ServerUtil { ClusterConnection connection = null; if((connection = connections.get(connectionType)) == null) { synchronized (CoprocessorHConnectionTableFactory.class) { -if(connections.get(connectionType) == null) { -connection = new CoprocessorHConnection(conf, server); +if((connection = connections.get(connectionType)) == null) { +connection = new CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), server); connections.put(connectionType, connection); return connection; } @@ -405,7 +405,7 @@ public class ServerUtil { } public static Configuration getIndexWriterConfigurationWithCustomThreads(Configuration conf) { -Configuration clonedConfig = PropertiesUtil.cloneConfig(conf); +Configuration clonedConfig = getIndexWriterConnection(conf); setHTableThreads(clonedConfig); return clonedConfig; }
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 e6119229c -> c63d563c1 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c63d563c Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c63d563c Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c63d563c Branch: refs/heads/4.x-HBase-1.1 Commit: c63d563c1b7bb7affbfcd0e81ea23c4a436bf396 Parents: e611922 Author: Rajeshbabu Chintaguntla Authored: Fri May 11 23:23:03 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri May 11 23:23:03 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/c63d563c/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 5d2a94c..66bfca3 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -346,8 +346,8 @@ public class ServerUtil { ClusterConnection connection = null; if((connection = connections.get(connectionType)) == null) { synchronized (CoprocessorHConnectionTableFactory.class) { -if(connections.get(connectionType) == null) { -connection = new CoprocessorHConnection(conf, server); +if((connection = connections.get(connectionType)) == null) { +connection = new CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), server); connections.put(connectionType, connection); return connection; } @@ -405,7 +405,7 @@ public class ServerUtil { } public static Configuration getIndexWriterConfigurationWithCustomThreads(Configuration conf) { -Configuration clonedConfig = PropertiesUtil.cloneConfig(conf); +Configuration clonedConfig = getIndexWriterConnection(conf); setHTableThreads(clonedConfig); return clonedConfig; }
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-0.98 faacf0469 -> 829f3fcc8 PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/829f3fcc Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/829f3fcc Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/829f3fcc Branch: refs/heads/4.x-HBase-0.98 Commit: 829f3fcc82c6a00910db5b21b85aea90f9f0afbf Parents: faacf04 Author: Rajeshbabu Chintaguntla Authored: Fri May 11 23:39:31 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri May 11 23:39:31 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/829f3fcc/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index 106adb1..fe27ab4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -351,8 +351,8 @@ public class ServerUtil { HConnection connection = null; if((connection = connections.get(connectionType)) == null) { synchronized (CoprocessorHConnectionTableFactory.class) { -if(connections.get(connectionType) == null) { -connection = new CoprocessorHConnection(conf, server); +if((connection = connections.get(connectionType)) == null) { +connection = new CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), server); connections.put(connectionType, connection); return connection; } @@ -410,7 +410,7 @@ public class ServerUtil { } public static Configuration getIndexWriterConfigurationWithCustomThreads(Configuration conf) { -Configuration clonedConfig = PropertiesUtil.cloneConfig(conf); +Configuration clonedConfig = getIndexWriterConnection(conf); setHTableThreads(clonedConfig); return clonedConfig; }
phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 ded15dc42 -> e05f2bcea PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e05f2bce Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e05f2bce Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e05f2bce Branch: refs/heads/5.x-HBase-2.0 Commit: e05f2bceae498573f124d12e5c570af9ca02cf7e Parents: ded15dc Author: Rajeshbabu Chintaguntla Authored: Sat May 12 03:00:49 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Sat May 12 03:00:49 2018 +0530 -- .../src/main/java/org/apache/phoenix/util/ServerUtil.java| 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/e05f2bce/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java index cccf1c9..d34514d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -37,9 +37,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -366,7 +364,7 @@ public class ServerUtil { } public static Configuration getIndexWriterConfigurationWithCustomThreads(Configuration conf) { -Configuration clonedConfig = PropertiesUtil.cloneConfig(conf); +Configuration clonedConfig = getIndexWriterConnection(conf); setHTableThreads(clonedConfig); return clonedConfig; }
phoenix git commit: PHOENIX-4671 Fix minor size accounting bug for MutationSize(Lars Hofhansl)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 09bd6983c -> 1b2c95094 PHOENIX-4671 Fix minor size accounting bug for MutationSize(Lars Hofhansl) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1b2c9509 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1b2c9509 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1b2c9509 Branch: refs/heads/5.x-HBase-2.0 Commit: 1b2c95094701bf62e1f646ba33d070aba0ac3d94 Parents: 09bd698 Author: Rajeshbabu Chintaguntla Authored: Thu May 24 16:33:15 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu May 24 16:33:15 2018 +0530 -- .../end2end/UpsertSelectAutoCommitIT.java | 28 .../apache/phoenix/execute/MutationState.java | 1 + 2 files changed, 29 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b2c9509/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java index 6b781a0..38d48d6 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java @@ -23,15 +23,19 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.sql.Connection; import java.sql.Date; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.util.PropertiesUtil; import org.junit.Test; @@ -173,4 +177,28 @@ public class UpsertSelectAutoCommitIT extends ParallelStatsDisabledIT { conn.close(); } +@Test +public void testMaxMutationSize() throws Exception { +Properties connectionProperties = new Properties(); + connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3"); + connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "5"); +PhoenixConnection connection = +(PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties); +connection.setAutoCommit(true); +String fullTableName = generateUniqueName(); +try (Statement stmt = connection.createStatement()) { +stmt.execute( +"CREATE TABLE " + fullTableName + " (pk INTEGER PRIMARY KEY, v1 INTEGER, v2 INTEGER)"); +stmt.execute( +"CREATE SEQUENCE " + fullTableName + "_seq cache 1000"); +stmt.execute("UPSERT INTO " + fullTableName + " VALUES (NEXT VALUE FOR " + fullTableName + "_seq, rand(), rand())"); +} +try (Statement stmt = connection.createStatement()) { +for (int i=0; i<16; i++) { +stmt.execute("UPSERT INTO " + fullTableName + " SELECT NEXT VALUE FOR " + fullTableName + "_seq, rand(), rand() FROM " + fullTableName); +} +} +connection.close(); +} + } http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b2c9509/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java index 1d95f08..1d662ab 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java @@ -1581,6 +1581,7 @@ public class MutationState implements SQLCloseable { public void clear(){ rowKeyToRowMutationState.clear(); +estimatedSize = 0; } public Collection values() {
phoenix git commit: PHOENIX-4778 Fix rat:check failure on 5.x branch(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 abcf0d1ab -> 7ecf47443 PHOENIX-4778 Fix rat:check failure on 5.x branch(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7ecf4744 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7ecf4744 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7ecf4744 Branch: refs/heads/5.x-HBase-2.0 Commit: 7ecf47443b5dcc4c6993463e20f9a13ea8564cb0 Parents: abcf0d1 Author: Rajeshbabu Chintaguntla Authored: Mon Jun 11 12:00:37 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Jun 11 12:00:37 2018 +0530 -- .../apache/phoenix/end2end/MutationStateIT.java| 17 + .../end2end/index/MutableIndexRebuilderIT.java | 17 + 2 files changed, 34 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/7ecf4744/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java index 2d5f360..36782c1 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.phoenix.end2end; import static org.junit.Assert.assertEquals; http://git-wip-us.apache.org/repos/asf/phoenix/blob/7ecf4744/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java index a29766f..e1c8f81 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.phoenix.end2end.index; import static org.junit.Assert.assertEquals;
[phoenix] Git Push Summary
Repository: phoenix Updated Tags: refs/tags/v5.0.0-HBase-2.0-rc0 [created] 555bada4e
svn commit: r27384 - in /dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0: ./ bin/ src/
Author: rajeshbabu Date: Mon Jun 11 21:09:01 2018 New Revision: 27384 Log: Adding binary and soruce of 5.0.0 rc0 Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz (with props) dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.md5 dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256 dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512 dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz (with props) dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.md5 dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha256 dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha512 Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz == Binary file - no diff available. Propchange: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz -- svn:mime-type = application/octet-stream Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc == --- dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc (added) +++ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc Mon Jun 11 21:09:01 2018 @@ -0,0 +1,11 @@ +-BEGIN PGP SIGNATURE- +Version: GnuPG v2 + +iQEcBAABCAAGBQJbHrawAAoJEO2Bo2zjpl28qYUIALTHO8qINC6jKVVWgqIw2XAR +Ss2xRzRFd4/aCyCfmI/jGwT78QCnMDjNUaQYEknZfa4xVRK+YBTUesEVCrOdOOkG +FGrU9fjIrielTR4sCVAMxks7CiKH0K0uOgRXzgLNHBYTddgy1eqnLEHmaBLC6T42 +z5FmsZpH1jnF8FeX64ofqeM5V35OKJaVWRQTIirCiYBYAONW4CjSDWX5VfWets93 +XaBgY/m9CvrI2vQTcOBjgMnObrvVmt1TWXNczPRosi0TMHZ9kBfNT5/Pq3n1dgzl +QESr2OmvFjpckW+shKtcQf5OMhDOzHGhetIch0ER8FucZlGv88OZXuhVP4gY7zo= +=p5TV +-END PGP SIGNATURE- Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.md5 == --- dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.md5 (added) +++ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.md5 Mon Jun 11 21:09:01 2018 @@ -0,0 +1 @@ +MD5(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 2e9a64512ae3022eee0ec7e8215e9afb Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256 == --- dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256 (added) +++ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256 Mon Jun 11 21:09:01 2018 @@ -0,0 +1 @@ +SHA256(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 254d9067867803d779a3564f6d520e1d04bb2da4acba99ceaa9f5d95c35a10ee Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512 == --- dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512 (added) +++ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512 Mon Jun 11 21:09:01 2018 @@ -0,0 +1 @@ +SHA512(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= ac10d64016ac839fb82747843c5b098303b99830551da08d1345508f9026b2a55faeae9894e8a7b882e9f6d86446cc95e41c72177c2b2e34e2d9e3b2ea229163 Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz == Binary file - no diff available. Propchange: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz -- svn:mime-type = application/octet-stream Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc
[phoenix] Git Push Summary
Repository: phoenix Updated Tags: refs/tags/v5.0.0-HBase-2.0-rc0 [deleted] 555bada4e
[phoenix] Git Push Summary
Repository: phoenix Updated Tags: refs/tags/v5.0.0-HBase-2.0-rc0 [created] eb19c9017
svn commit: r27418 - /dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/
Author: rajeshbabu Date: Wed Jun 13 08:21:08 2018 New Revision: 27418 Log: Deleting 5.0.0 rc0 Removed: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/
svn commit: r27419 - in /dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0: ./ bin/ src/
Author: rajeshbabu Date: Wed Jun 13 08:36:58 2018 New Revision: 27419 Log: Adding binary and soruce of 5.0.0 rc0 Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz (with props) dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256 dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512 dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz (with props) dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha256 dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha512 Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz == Binary file - no diff available. Propchange: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz -- svn:mime-type = application/octet-stream Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc == --- dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc (added) +++ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc Wed Jun 13 08:36:58 2018 @@ -0,0 +1,11 @@ +-BEGIN PGP SIGNATURE- +Version: GnuPG v2 + +iQEcBAABCAAGBQJbINJfAAoJEO2Bo2zjpl28SLsH/jV3xZWY6caHEcg6AYXo21sL ++pC3h4k9lK449bCJwNQSNrabi6KinTRGcTNXGDwOGqp/y1VzZLl9XKSQUXyppPqY +D30ODpRcbuKSPFW7fxc+KQQDBYI16zbphc2u1AsNJIIGxIYHU257tQfVxxW6RR/1 +sDllD6WuTlGIdLqhfD5qhKkG+l6r2jDv6ZucQ0Z4GSMQa45Jm3u7m3uwqdGMpkAB +xC1YirY7EsKM3ei1tqBb9GMv5ZujEfNyaTvD5rfHa3BCgJIuY42fSmcmNN2ZRmEq +/tyrOAIU+OXIimd9o3/4672uwcghCzv5z3dgpK9Oi3ApJFY43L4UNDLJ3triJmg= +=i3s9 +-END PGP SIGNATURE- Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256 == --- dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256 (added) +++ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256 Wed Jun 13 08:36:58 2018 @@ -0,0 +1 @@ +SHA256(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 8d6a82196dd9a03dc3f0657daf1c57ea2fd34fa7f2586436bcb84abb5c1212d3 Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512 == --- dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512 (added) +++ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512 Wed Jun 13 08:36:58 2018 @@ -0,0 +1 @@ +SHA512(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 3ff6f5f6757362e768491c8d97fb1bc1c661f6b9147495a6167ed9a6050afe09f0b746df5a005991359a0f0bb25c69adb966123158a8671e27f157e31ab59f09 Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz == Binary file - no diff available. Propchange: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz -- svn:mime-type = application/octet-stream Added: dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc == --- dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc (added) +++ dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc Wed Jun 13 08:36:58 2018 @@ -0,0 +1,11 @@ +-BEGIN PGP SIGNATURE- +Version: GnuPG v2 + +iQEcBAABCAAGBQJbINJqAAoJEO2Bo2zjpl28UjMH/3tnlH23N2e9M5eb1F9lY+Ml +SGYO8ZawDSIhxnldF8/6VAkWWolp/Kx1o6tXAZ05Idce6zvZT3VG3QZbXfbnOBxt +XZgKUzWzbqtpHVWL0dIK+S04D/0mgDtprgCdDpo8Gs8KULd9jV7f8qBvM0AShjzt +pKNO1JqJ7hAoFVtuRmkPa0k7FUZ4STXTMKkvdkCxMPeGNcWDt29oA8WS4hD61u+z +U7OQD8pJ9t8JwQMRZGTr1thQEbD72u1R/Grk4HjHMam+V8cuC2cp+cMr4fsbCZX7 +f2J1spBk/DYSV+R5plx2im
svn commit: r27420 - /dev/phoenix/KEYS
Author: rajeshbabu Date: Wed Jun 13 08:39:04 2018 New Revision: 27420 Log: Updating KEYS with rajeshb...@apache.org signing key Modified: dev/phoenix/KEYS Modified: dev/phoenix/KEYS == --- dev/phoenix/KEYS (original) +++ dev/phoenix/KEYS Wed Jun 13 08:39:04 2018 @@ -531,3 +531,61 @@ ym3HhetHFTWYHa7PR4f8VqAw4h578hAsNcsgj10f IAHw3/o2ABjphvjf9E8frQ== =gKGe -END PGP PUBLIC KEY BLOCK- +pub 4096R/AAEDBD7B 2018-06-11 +uid Rajeshbabu Chintaguntla (CODE SIGNING KEY) +sig 3AAEDBD7B 2018-06-11 Rajeshbabu Chintaguntla (CODE SIGNING KEY) +sub 4096R/2A1817DB 2018-06-11 +sig AAEDBD7B 2018-06-11 Rajeshbabu Chintaguntla (CODE SIGNING KEY) + +-BEGIN PGP PUBLIC KEY BLOCK- +Version: GnuPG v1 + +mQINBFseLj8BEADgUf2qoeYAE3MzcxEDysTm0fo/qtHJXTvZexVi+w/Xg86yvSuw +WzChpYY8Z04cY+ruXRmwfvsfH//YlquJr02uHavojeqKsOW2kV03czh16ktNWwtS +v7OIU3RzCFt67D2wtMJzbiA9l1WJyzvOx7xnV+uovUNsURYc42YEnwgMJPUQUz0D +4x8SBKBmL31DELZ72PdgsMSNs7xBrvsXqybDlWkFFkTZWQ6ZwGE6L22ddmOT4N8e +0FMxIJRrCPn7xMKF2xUEE05sKw+lxLbCO38gyc42AeEVhP+qCvG3E1ZhvqNe/l1y +LHm4vBNxmR9bgXHf2BvmSBKdGeI2oHM7BQzZ071nH3PHnwr3ksTvNpqW9FkgzjEp +02fv/n9iANp4ZYCphOdm1Ea9iGKvn0M44seoktT2Gh0Eof3hNOsHJcOCmU494w4I +77zeCFHbLEVpCMNDLzHccmBBD5FNoOQRQouTuy8aWXsZeRUQrzLw3fF5KcxEtTaI +SzYKmbGJayeYDpLDvZcU7KZyKm3X+bHWWXlU4sG37hCDfEcQjSwJBXyDmWMpqw7R +FC6pFHCjIXofoNFdY6nMOeQ89B8hSCzgBb5Dh8hxwsGIjSyNF63reJ64EAKAi69s +EAmWx1f3ivi54ZSHhb2mQ/vfdy84xUk8RwYF6WBNmR3I+DRQNKpKDvUxSwARAQAB +tEJSYWplc2hiYWJ1IENoaW50YWd1bnRsYSAoQ09ERSBTSUdOSU5HIEtFWSkgPHJh +amVzaGJhYnVAYXBhY2hlLm9yZz6JAjgEEwECACIFAlseLj8CGwMGCwkIBwMCBhUI +AgkKCwQWAgMBAh4BAheAAAoJEDGP2Guq7b1774QQAN2//IV3qQNGoGU2Ihvo3F3n +6YkW8y9qy3+Gz17mNhcBgtxZitf8xdch5JqDh/TU/LpV4z9guxnM3dNuwvZ3tbIw +qog0UTDGoeCe4YokRI0kl19a1rn575PQyYu6TEEJrgTKA/yzz8Ck2UbEJ6gJMkxj +DW5EXFm0QN5Eg+NMrsxcDuf+9hFJ1LagFXZdhPfI9+XzuDDXgZaTXxhd8QQu0d/d +dcrEc1Pnk65CdDHHezdtMvm0WTw+staTO6/bF6QonM+qgpsZaPxtnU3/rMamGT5k +rRG+STpLDdz4EUkk5+ZMSJei7glxv4Nx+16yi2YoAtRgoxyjPKBG8v6GCSWXu2K+ +LYoUZmQsnUGgvH2E8V/7XAh68H4/YSTnqhn8TEq5771aya9PzhhvHipSHkWxIkR2 +UjmnJjTkBCXkM4ju4hc/QHFa2J/tvzcAx0WRCUqioE6i/JI72C+nmgRZ4TUYy/nS +tpAH4MSlWCwEWHEKDs85rKIDTL8qAi4X4HAreSsZp8+igYsr0zkhLjAhaJB6qMpB +oek0Ke7O1L4m5AEzMayJzMdqdSKK4X3rzW8Qu7Wc1e+oN3BXEWfdGOYB3Mrssfhp +ZvJEHrTwuMBtb2bAdoiCG5Q/i6mozfzfYaGHd5i7J2ujDdC9Qie/lRyQlwryy1gb +hxGRAScz2xfMa/XNCawCuQINBFseLj8BEAChyrxmBZsR/gKi0Wj6TaLGENMf2mT7 +hiVTnKYU0yAgA1LsDGMVyF+dzwNgLL600LLqrqFMTVqrOiEF3hKLAQ3cjxSE7rwg +0X02ZCOX69Y3+7/xOAMUT93Aqk1WZYUN28uCGO+6Y7Q4oT/V8OZbixSzaIAJ3jyI +jGElDaw+VMYydMXU7Z9c4rhIjjEE3AkKtlB5KGAKHTUYi8GiJhqrmy02jhtHSXT/ +AwAIIxT8gZ5Rgx4MErIjGn7fp+fvbMYK0FbcbreO7flyls6dHwWAGcI5VbxWCCZu +5YZlpo8z/iYkGLB2QV/gtzp7Wm4lZkMX61j+PTpzNO7I6rRQckhyeFF1ZcPK73ey +EmjwWBmo7K/iMPWCYFrU7/ybdXf+TDzuyUYosJUSqG8cIERHBIQc5E4TsdNd3hJR +TPNF9YzXv/iJK9PDO962zLS3cP373/QCiU+Q8rAhqfrX9Mygobsvc1PopPVn66hc +BzwUUuLUY/5du2/hh8BC99BIa3BJ/wxCN7kc1UGdyyffoBB5gcnOC14r7SbJSVNU +ymw1y8UXC2XQQyKFixUCsJfY94ZwqO7cmPxsWBc+DUtL7AP69ZkzYFmPlIwh0f+G +l8/m4ai+tWnxIUhMfyvwyo2E4AJ+AGCfZwUdxB+uCD7AbTy/Sef7vv0zXGTk7brb +am8myY/u0xaF8QARAQABiQIfBBgBAgAJBQJbHi4/AhsMAAoJEDGP2Guq7b17EZMQ +AJAWU7hFSZP1tlwDziRN2FSSd+jHV59oBxzC1MONhT9c0VneodzER/NaK+6N2H7+ +CpQRV6ePzG3iUPj2Wtz8U/eX42Ia8OwnmLM0voJH529ZLPv5tVxqMV+UgbssDMWu +nTeLw/pg4Vy8bec0HHPMwsmooQJQXu+e9/5DoRrepaGMFXu+BpKOQDN8Gvlq0BEA +v9ojEbhRlxflV11VBq57bj1d+5F+JOKexqJGsbZvnY86j7hAY9BIKUKaA4HmhKX2 +vRjM6izxVA0ivJM5X/3qHMakjARrUFnbQkfh+dWQo5NZfQMp5m6uhQwwfhGIYhII +iqk0+zhbgix9DfK7muLdEvE4TxAQO4sc8s/EtEmfHFWVtZA0mf+6dqU9WrigMEq3 +yA1KmQYUjXR+/iax60nd5SacdFKyehhFSAE7dC8Sqp/wT+fkgcYzUEzkiMVqx53n +PXfiDPR3VzC0FwBLgXyJLf3B9lGuICia1wcNc4oKiA68kxc3EPbcYxLtVMKRTvBf +dxN6Siuv6yJzuGJaj/C1fPGVwzbt/x0L7CQw8W5k3XzZlp7bCYyd7AXNZZOvAEQ9 +mE6f5bevE4Bock2Ee72bg391F7sf/bFXAouOiUgyoVHmW2ZSMfLWx6fP8Dq2AmAM +hP9muXjqvjO7SxxLReObjt/gs3HtjFUnJzTX/TlAaMq9 +=rPAq +-END PGP PUBLIC KEY BLOCK-
phoenix git commit: PHOENIX-4528 PhoenixAccessController checks permissions only at table level when creating views(Karan Mehta)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 16fa7f661 -> 61affd431 PHOENIX-4528 PhoenixAccessController checks permissions only at table level when creating views(Karan Mehta) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/61affd43 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/61affd43 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/61affd43 Branch: refs/heads/5.x-HBase-2.0 Commit: 61affd431b8c4a1730804f0c0d5a0035b797e178 Parents: 16fa7f6 Author: Rajeshbabu Chintaguntla Authored: Fri Jun 15 10:38:05 2018 -0700 Committer: Rajeshbabu Chintaguntla Committed: Fri Jun 15 10:38:05 2018 -0700 -- .../phoenix/end2end/BasePermissionsIT.java | 4 + .../phoenix/end2end/ChangePermissionsIT.java| 26 +- .../coprocessor/PhoenixAccessController.java| 95 +--- 3 files changed, 92 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/61affd43/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java index 9f91267..7698fca 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java @@ -748,6 +748,10 @@ public class BasePermissionsIT extends BaseTest { } } +String surroundWithDoubleQuotes(String input) { +return "\"" + input + "\""; +} + void validateAccessDeniedException(AccessDeniedException ade) { String msg = ade.getMessage(); assertTrue("Exception contained unexpected message: '" + msg + "'", http://git-wip-us.apache.org/repos/asf/phoenix/blob/61affd43/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java index 0d764d8..106438f 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.TableNotFoundException; +import org.apache.phoenix.util.SchemaUtil; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -144,7 +145,7 @@ public class ChangePermissionsIT extends BasePermissionsIT { verifyAllowed(createSchema(SCHEMA_NAME), superUser1); verifyAllowed(grantPermissions("C", regularUser1, SCHEMA_NAME, true), superUser1); } else { -verifyAllowed(grantPermissions("C", regularUser1, "\"" + QueryConstants.HBASE_DEFAULT_SCHEMA_NAME + "\"", true), superUser1); +verifyAllowed(grantPermissions("C", regularUser1, surroundWithDoubleQuotes(QueryConstants.HBASE_DEFAULT_SCHEMA_NAME), true), superUser1); } // Create new table. Create indexes, views and view indexes on top of it. Verify the contents by querying it @@ -235,7 +236,7 @@ public class ChangePermissionsIT extends BasePermissionsIT { verifyAllowed(createSchema(SCHEMA_NAME), superUser1); verifyAllowed(grantPermissions("C", regularUser1, SCHEMA_NAME, true), superUser1); } else { -verifyAllowed(grantPermissions("C", regularUser1, "\"" + QueryConstants.HBASE_DEFAULT_SCHEMA_NAME + "\"", true), superUser1); +verifyAllowed(grantPermissions("C", regularUser1, surroundWithDoubleQuotes(QueryConstants.HBASE_DEFAULT_SCHEMA_NAME), true), superUser1); } // Create MultiTenant Table (View Index Table should be automatically created) @@ -266,4 +267,25 @@ public class ChangePermissionsIT extends BasePermissionsIT { verifyAllowed(readMultiTenantTableWithIndex(VIEW1_TABLE_NAME, "o1"), regularUser2); verifyAllowed(readMultiTenantTableWithoutIndex(VIEW2_TABLE_NAME, "o2"), regularUser2); } + +/** + * Grant RX permissions on the schema to regularUser1, + * Creating view on a table with that schema by regularUser1 should be allowed + */ +@Test +public
phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master c233c15c1 -> 6acdae0ff PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6acdae0f Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6acdae0f Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6acdae0f Branch: refs/heads/master Commit: 6acdae0ff1a63980f40fe1b794d40ab949cc423d Parents: c233c15 Author: Rajeshbabu Chintaguntla Authored: Fri Jun 15 15:38:44 2018 -0700 Committer: Rajeshbabu Chintaguntla Committed: Fri Jun 15 15:38:44 2018 -0700 -- .../phoenix/coprocessor/GroupedAggregateRegionObserver.java | 8 1 file changed, 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/6acdae0f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java index 86ab275..aefe916 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java @@ -534,14 +534,6 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver { currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); results.add(keyValue); -if (logger.isInfoEnabled()) { -logger.info(LogUtil.addCustomAnnotations("Adding new aggregate row: " -+ keyValue -+ ",for current key " -+ Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), -currentKey.getLength()) + ", aggregated values: " -+ Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan))); -} // If we're at an aggregation boundary, reset the // aggregators and // aggregate with the current result (which is not a part of
phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-0.98 bfd083396 -> 175fe3fae PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/175fe3fa Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/175fe3fa Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/175fe3fa Branch: refs/heads/4.x-HBase-0.98 Commit: 175fe3fae0577fdc769c8ffbada9a3c2e2d6fb91 Parents: bfd0833 Author: Rajeshbabu Chintaguntla Authored: Fri Jun 15 15:39:23 2018 -0700 Committer: Rajeshbabu Chintaguntla Committed: Fri Jun 15 15:39:23 2018 -0700 -- .../phoenix/coprocessor/GroupedAggregateRegionObserver.java | 8 1 file changed, 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/175fe3fa/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java index f69e30c..cfec0cd 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java @@ -531,14 +531,6 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver { currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); results.add(keyValue); -if (logger.isInfoEnabled()) { -logger.info(LogUtil.addCustomAnnotations("Adding new aggregate row: " -+ keyValue -+ ",for current key " -+ Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), -currentKey.getLength()) + ", aggregated values: " -+ Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan))); -} // If we're at an aggregation boundary, reset the // aggregators and // aggregate with the current result (which is not a part of
phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 a6125a3b1 -> 441dfbd27 PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/441dfbd2 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/441dfbd2 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/441dfbd2 Branch: refs/heads/4.x-HBase-1.1 Commit: 441dfbd2707c0b9f4ae9d1d61a1cdd585f90224c Parents: a6125a3 Author: Rajeshbabu Chintaguntla Authored: Fri Jun 15 15:39:57 2018 -0700 Committer: Rajeshbabu Chintaguntla Committed: Fri Jun 15 15:39:57 2018 -0700 -- .../phoenix/coprocessor/GroupedAggregateRegionObserver.java | 8 1 file changed, 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/441dfbd2/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java index 86ab275..aefe916 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java @@ -534,14 +534,6 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver { currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); results.add(keyValue); -if (logger.isInfoEnabled()) { -logger.info(LogUtil.addCustomAnnotations("Adding new aggregate row: " -+ keyValue -+ ",for current key " -+ Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), -currentKey.getLength()) + ", aggregated values: " -+ Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan))); -} // If we're at an aggregation boundary, reset the // aggregators and // aggregate with the current result (which is not a part of
phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 cf1a1a683 -> 179bea2c1 PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/179bea2c Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/179bea2c Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/179bea2c Branch: refs/heads/4.x-HBase-1.2 Commit: 179bea2c186e1286fe7492423751e748d21afc5c Parents: cf1a1a6 Author: Rajeshbabu Chintaguntla Authored: Fri Jun 15 15:40:25 2018 -0700 Committer: Rajeshbabu Chintaguntla Committed: Fri Jun 15 15:40:25 2018 -0700 -- .../phoenix/coprocessor/GroupedAggregateRegionObserver.java | 8 1 file changed, 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/179bea2c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java index 86ab275..aefe916 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java @@ -534,14 +534,6 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver { currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); results.add(keyValue); -if (logger.isInfoEnabled()) { -logger.info(LogUtil.addCustomAnnotations("Adding new aggregate row: " -+ keyValue -+ ",for current key " -+ Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), -currentKey.getLength()) + ", aggregated values: " -+ Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan))); -} // If we're at an aggregation boundary, reset the // aggregators and // aggregate with the current result (which is not a part of
phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 6f4a48fe7 -> a0ef6613d PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a0ef6613 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a0ef6613 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a0ef6613 Branch: refs/heads/4.x-HBase-1.3 Commit: a0ef6613dfde647ac9b680744b4628dd2423c33f Parents: 6f4a48f Author: Rajeshbabu Chintaguntla Authored: Fri Jun 15 15:40:51 2018 -0700 Committer: Rajeshbabu Chintaguntla Committed: Fri Jun 15 15:40:51 2018 -0700 -- .../phoenix/coprocessor/GroupedAggregateRegionObserver.java | 8 1 file changed, 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/a0ef6613/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java index 86ab275..aefe916 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java @@ -534,14 +534,6 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver { currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); results.add(keyValue); -if (logger.isInfoEnabled()) { -logger.info(LogUtil.addCustomAnnotations("Adding new aggregate row: " -+ keyValue -+ ",for current key " -+ Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), -currentKey.getLength()) + ", aggregated values: " -+ Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan))); -} // If we're at an aggregation boundary, reset the // aggregators and // aggregate with the current result (which is not a part of
phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 61affd431 -> 8cceea621 PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8cceea62 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8cceea62 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8cceea62 Branch: refs/heads/5.x-HBase-2.0 Commit: 8cceea6214297659e79d89f762c07349f84b74e9 Parents: 61affd4 Author: Rajeshbabu Chintaguntla Authored: Fri Jun 15 15:41:21 2018 -0700 Committer: Rajeshbabu Chintaguntla Committed: Fri Jun 15 15:41:21 2018 -0700 -- .../phoenix/coprocessor/GroupedAggregateRegionObserver.java | 8 1 file changed, 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cceea62/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java index 1ded543..e58407f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java @@ -542,14 +542,6 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver im currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); results.add(keyValue); -if (logger.isInfoEnabled()) { -logger.info(LogUtil.addCustomAnnotations("Adding new aggregate row: " -+ keyValue -+ ",for current key " -+ Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), -currentKey.getLength()) + ", aggregated values: " -+ Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan))); -} // If we're at an aggregation boundary, reset the // aggregators and // aggregate with the current result (which is not a part of
phoenix git commit: Changing phoenix version in all the modules
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 609465c03 -> 51a34bb89 Changing phoenix version in all the modules Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/51a34bb8 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/51a34bb8 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/51a34bb8 Branch: refs/heads/5.x-HBase-2.0 Commit: 51a34bb89aaeefd81fc1f3d9677341dbf79d47b4 Parents: 609465c Author: Rajeshbabu Chintaguntla Authored: Thu Nov 2 17:05:13 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Nov 2 17:05:13 2017 +0530 -- phoenix-assembly/pom.xml | 2 +- phoenix-client/pom.xml | 2 +- phoenix-core/pom.xml | 2 +- phoenix-flume/pom.xml | 2 +- phoenix-hive/pom.xml | 2 +- phoenix-kafka/pom.xml | 2 +- phoenix-load-balancer/pom.xml | 2 +- phoenix-pherf/pom.xml | 2 +- phoenix-pig/pom.xml| 2 +- phoenix-queryserver-client/pom.xml | 2 +- phoenix-queryserver/pom.xml| 2 +- phoenix-server/pom.xml | 2 +- phoenix-spark/pom.xml | 2 +- phoenix-tracing-webapp/pom.xml | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/51a34bb8/phoenix-assembly/pom.xml -- diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml index ae28514..7528ef9 100644 --- a/phoenix-assembly/pom.xml +++ b/phoenix-assembly/pom.xml @@ -27,7 +27,7 @@ org.apache.phoenix phoenix -4.13.0-SNAPSHOT +5.0.0-SNAPSHOT phoenix-assembly Phoenix Assembly http://git-wip-us.apache.org/repos/asf/phoenix/blob/51a34bb8/phoenix-client/pom.xml -- diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml index 648c452..a58e56c 100644 --- a/phoenix-client/pom.xml +++ b/phoenix-client/pom.xml @@ -27,7 +27,7 @@ org.apache.phoenix phoenix -4.13.0-SNAPSHOT +5.0.0-SNAPSHOT phoenix-client Phoenix Client http://git-wip-us.apache.org/repos/asf/phoenix/blob/51a34bb8/phoenix-core/pom.xml -- diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml index b51f5be..14f6e60 100644 --- a/phoenix-core/pom.xml +++ b/phoenix-core/pom.xml @@ -4,7 +4,7 @@ org.apache.phoenix phoenix -4.13.0-SNAPSHOT +5.0.0-SNAPSHOT phoenix-core Phoenix Core http://git-wip-us.apache.org/repos/asf/phoenix/blob/51a34bb8/phoenix-flume/pom.xml -- diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml index 63df1af..536fb76 100644 --- a/phoenix-flume/pom.xml +++ b/phoenix-flume/pom.xml @@ -26,7 +26,7 @@ org.apache.phoenix phoenix -4.13.0-SNAPSHOT +5.0.0-SNAPSHOT phoenix-flume Phoenix - Flume http://git-wip-us.apache.org/repos/asf/phoenix/blob/51a34bb8/phoenix-hive/pom.xml -- diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml index b0fd817..cd1f3fe 100644 --- a/phoenix-hive/pom.xml +++ b/phoenix-hive/pom.xml @@ -27,7 +27,7 @@ org.apache.phoenix phoenix -4.13.0-SNAPSHOT +5.0.0-SNAPSHOT phoenix-hive Phoenix - Hive http://git-wip-us.apache.org/repos/asf/phoenix/blob/51a34bb8/phoenix-kafka/pom.xml -- diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml index 47da23c..e2676974 100644 --- a/phoenix-kafka/pom.xml +++ b/phoenix-kafka/pom.xml @@ -26,7 +26,7 @@ org.apache.phoenix phoenix - 4.13.0-SNAPSHOT + 5.0.0-SNAPSHOT phoenix-kafka Phoenix - Kafka http://git-wip-us.apache.org/repos/asf/phoenix/blob/51a34bb8/phoenix-load-balancer/pom.xml -- diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml index b682140..d8a47e9 100644 --- a/phoenix-load-balancer/pom.xml +++ b/phoenix-load-balancer/pom.xml @@ -27,7 +27,7 @@ org.apache.phoenix phoenix -4.13.0-SNAPSHOT +5.0.0-SNAPSHOT phoenix-load-balancer Phoenix Load Balancer http://git-wip-us.apache.org/repos/asf/phoenix/blob/51a34bb8/phoenix-pherf/pom.xml -- diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml index 8368c45..a009a23 100644 --- a/phoenix-pherf/pom.xml +++ b/phoenix-pherf/pom.xml @@ -15,7 +1
[3/3] phoenix git commit: PHOENIX-4303 Replace HTableInterface, HConnection with Table, Connection interfaces respectively(Rajeshbabu)
PHOENIX-4303 Replace HTableInterface,HConnection with Table,Connection interfaces respectively(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/11390427 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/11390427 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/11390427 Branch: refs/heads/5.x-HBase-2.0 Commit: 113904275d0689755aea100aaeb43aed9bd9cc9d Parents: 136c7a6 Author: Rajeshbabu Chintaguntla Authored: Wed Nov 8 16:07:14 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Nov 8 16:07:14 2017 +0530 -- .../phoenix/end2end/AlterTableWithViewsIT.java | 4 +- .../org/apache/phoenix/end2end/BaseViewIT.java | 4 +- .../end2end/ColumnProjectionOptimizationIT.java | 4 +- .../phoenix/end2end/DisableLocalIndexIT.java| 4 +- .../apache/phoenix/end2end/DynamicColumnIT.java | 6 +- .../apache/phoenix/end2end/DynamicFamilyIT.java | 6 +- .../phoenix/end2end/MappingTableDataTypeIT.java | 9 +- .../phoenix/end2end/MetaDataEndPointIT.java | 4 +- .../phoenix/end2end/NativeHBaseTypesIT.java | 10 +- .../phoenix/end2end/PhoenixRuntimeIT.java | 16 +- .../end2end/QueryDatabaseMetaDataIT.java| 4 +- .../end2end/RebuildIndexConnectionPropsIT.java | 10 +- .../phoenix/end2end/StatsCollectorIT.java | 8 +- .../UpdateCacheAcrossDifferentClientsIT.java| 4 +- .../org/apache/phoenix/end2end/UpgradeIT.java | 7 +- .../apache/phoenix/end2end/UpsertValuesIT.java | 4 +- .../phoenix/end2end/index/BaseIndexIT.java | 4 +- .../phoenix/end2end/index/LocalIndexIT.java | 4 +- .../phoenix/end2end/index/MutableIndexIT.java | 4 +- .../end2end/index/PartialIndexRebuilderIT.java | 36 ++-- .../phoenix/tx/FlappingTransactionIT.java | 4 +- .../phoenix/tx/ParameterizedTransactionIT.java | 8 +- .../apache/phoenix/cache/ServerCacheClient.java | 9 +- .../apache/phoenix/compile/FromCompiler.java| 4 +- .../DelegateRegionCoprocessorEnvironment.java | 15 +- .../coprocessor/MetaDataEndpointImpl.java | 14 +- .../coprocessor/MetaDataRegionObserver.java | 6 +- .../apache/phoenix/execute/DelegateHTable.java | 185 --- .../apache/phoenix/execute/MutationState.java | 18 +- .../index/table/CoprocessorHTableFactory.java | 6 +- .../hbase/index/table/HTableFactory.java| 6 +- .../hbase/index/write/IndexWriterUtils.java | 19 +- .../write/ParallelWriterIndexCommitter.java | 4 +- .../TrackingParallelWriterIndexCommitter.java | 4 +- .../index/PhoenixIndexFailurePolicy.java| 6 +- .../index/PhoenixTransactionalIndexer.java | 2 +- .../phoenix/iterate/BaseResultIterators.java| 2 +- .../apache/phoenix/iterate/SnapshotScanner.java | 4 +- .../phoenix/iterate/TableResultIterator.java| 6 +- .../phoenix/mapreduce/PhoenixRecordReader.java | 3 +- .../mapreduce/index/DirectHTableWriter.java | 13 +- .../phoenix/mapreduce/index/IndexTool.java | 8 +- .../phoenix/query/ConnectionQueryServices.java | 7 +- .../query/ConnectionQueryServicesImpl.java | 65 +++ .../query/ConnectionlessQueryServicesImpl.java | 6 +- .../query/DelegateConnectionQueryServices.java | 7 +- .../apache/phoenix/query/GuidePostsCache.java | 4 +- .../phoenix/query/HConnectionFactory.java | 10 +- .../org/apache/phoenix/query/HTableFactory.java | 11 +- .../stats/DefaultStatisticsCollector.java | 4 +- .../phoenix/schema/stats/StatisticsUtil.java| 4 +- .../phoenix/schema/stats/StatisticsWriter.java | 12 +- .../transaction/OmidTransactionTable.java | 34 +--- .../transaction/PhoenixTransactionalTable.java | 22 +-- .../transaction/TephraTransactionTable.java | 101 +- .../phoenix/transaction/TransactionFactory.java | 6 +- .../java/org/apache/phoenix/util/IndexUtil.java | 19 +- .../org/apache/phoenix/util/MetaDataUtil.java | 14 +- .../org/apache/phoenix/util/ServerUtil.java | 21 +-- .../apache/phoenix/util/TransactionUtil.java| 4 +- .../org/apache/phoenix/util/UpgradeUtil.java| 93 +- .../hbase/index/write/FakeTableFactory.java | 12 +- .../hbase/index/write/TestIndexWriter.java | 8 +- .../index/write/TestParalleIndexWriter.java | 12 +- .../write/TestParalleWriterIndexCommitter.java | 12 +- .../java/org/apache/phoenix/util/TestUtil.java | 11 +- .../hive/mapreduce/PhoenixInputFormat.java | 7 +- 67 files changed, 488 insertions(+), 516 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java -- diff --git a/phoenix-core/src
[2/3] phoenix git commit: PHOENIX-4303 Replace HTableInterface, HConnection with Table, Connection interfaces respectively(Rajeshbabu)
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java index f45b356..444bb5d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java @@ -22,13 +22,13 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.coprocessor.Batch.Call; import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; @@ -46,19 +48,14 @@ import com.google.protobuf.Message; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; -public class DelegateHTable implements HTableInterface { -protected final HTableInterface delegate; +public class DelegateHTable implements Table { +protected final Table delegate; -public DelegateHTable(HTableInterface delegate) { +public DelegateHTable(Table delegate) { this.delegate = delegate; } @Override -public byte[] getTableName() { -return delegate.getTableName(); -} - -@Override public TableName getName() { return delegate.getName(); } @@ -79,34 +76,22 @@ public class DelegateHTable implements HTableInterface { } @Override -public Boolean[] exists(List gets) throws IOException { -return delegate.exists(gets); +public boolean[] existsAll(List gets) throws IOException { +return delegate.existsAll(gets); } @Override -public void batch(List actions, Object[] results) throws IOException, InterruptedException { +public void batch(List actions, Object[] results) throws IOException, +InterruptedException { delegate.batch(actions, results); } -@SuppressWarnings("deprecation") -@Override -public Object[] batch(List actions) throws IOException, InterruptedException { -return delegate.batch(actions); -} - @Override -public void batchCallback(List actions, Object[] results, Callback callback) -throws IOException, InterruptedException { +public void batchCallback(List actions, Object[] results, +Callback callback) throws IOException, InterruptedException { delegate.batchCallback(actions, results, callback); } -@SuppressWarnings("deprecation") -@Override -public Object[] batchCallback(List actions, Callback callback) throws IOException, -InterruptedException { -return delegate.batchCallback(actions, callback); -} - @Override public Result get(Get get) throws IOException { return delegate.get(get); @@ -117,12 +102,6 @@ public class DelegateHTable implements HTableInterface { return delegate.get(gets); } -@SuppressWarnings("deprecation") -@Override -public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { -return delegate.getRowOrBefore(row, family); -} - @Override public ResultScanner getScanner(Scan scan) throws IOException { return delegate.getScanner(scan); @@ -149,11 +128,18 @@ public class DelegateHTable implements HTableInterface { } @Override -public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException { +public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) +throws IOException { return delegate.checkAndPut(row, family, qualifier, value, put); } @Override +public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, +byte[] value, Put put) throws IOException { +return delegate.checkAndPut(row, family, qualifi
[1/3] phoenix git commit: PHOENIX-4303 Replace HTableInterface, HConnection with Table, Connection interfaces respectively(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 136c7a629 -> 113904275 http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java index c191d8d..ede2896 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java @@ -22,13 +22,13 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.coprocessor.Batch.Call; import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; @@ -56,11 +58,11 @@ public class TephraTransactionTable implements PhoenixTransactionalTable { private TephraTransactionContext tephraTransactionContext; -public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) { +public TephraTransactionTable(PhoenixTransactionContext ctx, Table hTable) { this(ctx, hTable, null); } -public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable, PTable pTable) { +public TephraTransactionTable(PhoenixTransactionContext ctx, Table hTable, PTable pTable) { assert(ctx instanceof TephraTransactionContext); @@ -171,32 +173,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable { transactionAwareHTable.close(); } -@Override -public long incrementColumnValue(byte[] row, byte[] family, -byte[] qualifier, long amount, boolean writeToWAL) -throws IOException { -return transactionAwareHTable.incrementColumnValue(row, family, qualifier, amount, writeToWAL); -} - -@Override -public Boolean[] exists(List gets) throws IOException { -return transactionAwareHTable.exists(gets); -} - -@Override -public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { -transactionAwareHTable.setAutoFlush(autoFlush, clearBufferOnFail); -} - -@Override -public void setAutoFlushTo(boolean autoFlush) { -transactionAwareHTable.setAutoFlush(autoFlush); -} - -@Override -public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { -return transactionAwareHTable.getRowOrBefore(row, family); -} @Override public TableName getName() { @@ -215,12 +191,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable { } @Override -public Object[] batch(List actions) throws IOException, -InterruptedException { -return transactionAwareHTable.batch(actions); -} - -@Override public void batchCallback(List actions, Object[] results, Callback callback) throws IOException, InterruptedException { @@ -228,12 +198,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable { } @Override -public Object[] batchCallback(List actions, -Callback callback) throws IOException, InterruptedException { -return transactionAwareHTable.batchCallback(actions, callback); -} - -@Override public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException { return transactionAwareHTable.checkAndPut(row, family, qualifier, value, put); @@ -329,22 +293,65 @@ public class TephraTransactionTable implements PhoenixTransactionalTable { } @Override -public void setOperationTimeout(int i) { -//transactionAwareHTable.setOperationTimeout(i); +public void setOperationTimeout(int operationTimeout) { +transactionAwareHTable.setOperationTimeout(operationTimeou
phoenix git commit: PHOENIX-4350 Replace deprecated or changed Region methods with new APIs(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 113904275 -> 62027bff1 PHOENIX-4350 Replace deprecated or changed Region methods with new APIs(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/62027bff Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/62027bff Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/62027bff Branch: refs/heads/5.x-HBase-2.0 Commit: 62027bff132dda23e6f7ae30334f191e68072ba2 Parents: 1139042 Author: Rajeshbabu Chintaguntla Authored: Wed Nov 8 17:05:31 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Nov 8 17:05:31 2017 +0530 -- ...ReplayWithIndexWritesAndCompressedWALIT.java | 8 ++- .../DataTableLocalIndexRegionScanner.java | 3 +- .../coprocessor/MetaDataEndpointImpl.java | 57 .../coprocessor/SequenceRegionObserver.java | 21 +++- .../UngroupedAggregateRegionObserver.java | 30 ++- .../hbase/index/IndexRegionSplitPolicy.java | 25 - .../org/apache/phoenix/hbase/index/Indexer.java | 3 +- .../stats/DefaultStatisticsCollector.java | 2 +- .../java/org/apache/phoenix/util/IndexUtil.java | 4 +- .../org/apache/phoenix/util/ServerUtil.java | 20 +++ pom.xml | 2 +- 11 files changed, 85 insertions(+), 90 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/62027bff/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index 5ca6de9..dfff8fe 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -183,15 +183,14 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(htd); +WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234"); +WAL wal = createWAL(this.conf, walFactory); // create the region + its WAL -HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); // FIXME: Uses private type +HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd, wal); // FIXME: Uses private type region0.close(); region0.getWAL().close(); -WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234"); - -WAL wal = createWAL(this.conf, walFactory); HRegionServer mockRS = Mockito.mock(HRegionServer.class); // mock out some of the internals of the RSS, so we can run CPs when(mockRS.getWAL(null)).thenReturn(wal); @@ -202,7 +201,6 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { when(mockRS.getServerName()).thenReturn(mockServerName); HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS)); region.initialize(); -when(region.getSequenceId()).thenReturn(0l); //make an attempted write to the primary that should also be indexed http://git-wip-us.apache.org/repos/asf/phoenix/blob/62027bff/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java -- diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java index 64d4ac4..eee6c93 100644 --- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java +++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java @@ -94,8 +94,7 @@ public class DataTableLocalIndexRegionScanner extends DelegateRegionScanner { boolean next = super.next(dataTableResults); addMutations(dataTableResults); if (ServerUtil.readyToCommit(mutationList.size(), mutationList.byteSize(), maxBatchSize, maxBatchSizeBytes)||!next) { -region.batchMutate(mutationList.toArray(new Mutation[mutationList.size()]), HConstants.NO_NONCE, -HConstants.NO_NONCE); +region.batchMutate(mutationList.toArray(new Mut
phoenix git commit: PHOENIX-4303 Remove HTable and Use Table APIs(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 62027bff1 -> d85e9165a PHOENIX-4303 Remove HTable and Use Table APIs(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d85e9165 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d85e9165 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d85e9165 Branch: refs/heads/5.x-HBase-2.0 Commit: d85e9165a7113449efb30cc9ab645e51da89629d Parents: 62027bf Author: Rajeshbabu Chintaguntla Authored: Thu Nov 9 14:01:08 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Nov 9 14:01:08 2017 +0530 -- .../wal/WALRecoveryRegionPostOpenIT.java| 13 ++ ...ReplayWithIndexWritesAndCompressedWALIT.java | 9 --- .../phoenix/end2end/AggregateQueryIT.java | 20 ++- .../phoenix/end2end/FlappingLocalIndexIT.java | 7 +++--- .../end2end/NamespaceSchemaMappingIT.java | 8 +++--- .../apache/phoenix/end2end/RowTimestampIT.java | 14 +++ .../apache/phoenix/end2end/StoreNullsIT.java| 9 --- .../org/apache/phoenix/end2end/UseSchemaIT.java | 6 +++-- .../phoenix/end2end/index/DropColumnIT.java | 18 +++--- .../index/IndexWithTableSchemaChangeIT.java | 6 ++--- .../phoenix/end2end/index/LocalIndexIT.java | 14 ++- .../index/MutableIndexReplicationIT.java| 8 +++--- ...erRegionServerIndexRpcControllerFactory.java | 3 +-- ...egionServerMetadataRpcControllerFactory.java | 3 +-- .../IndexHalfStoreFileReaderGenerator.java | 8 +++--- .../UngroupedAggregateRegionObserver.java | 1 - .../apache/phoenix/execute/DelegateHTable.java | 26 .../phoenix/mapreduce/AbstractBulkLoadTool.java | 25 --- .../mapreduce/MultiHfileOutputFormat.java | 20 +-- .../phoenix/mapreduce/index/IndexTool.java | 25 --- .../transaction/PhoenixTransactionalTable.java | 10 .../phoenix/hbase/index/IndexTestingUtils.java | 10 .../index/write/TestWALRecoveryCaching.java | 11 + 23 files changed, 174 insertions(+), 100 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/d85e9165/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java index d74ddb2..20d59a7 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java @@ -44,12 +44,14 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver; @@ -192,7 +194,8 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest { this.assertRegionServerDifferent(miniHBaseCluster); Scan scan = new Scan(); -HTable primaryTable = new HTable(getUtility().getConfiguration(), DATA_TABLE_NAME); +org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(getUtility().getConfiguration()); +Table primaryTable = hbaseConn.getTable(TableName.valueOf(DATA_TABLE_NAME)); ResultScanner resultScanner = primaryTable.getScanner(scan); int count = 0; for (Result result : resultScanner) { @@ -244,7 +247,7 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest { // the index table is one row -HTable indexTable = new HTable(getUtility().getConfiguration(), INDEX_TABLE_NAME); +Table indexTable = hbaseConn.getTable(TableName.valueOf(INDEX_TABLE_NAME)); resultScanner = indexTable.getScanner(scan); count = 0; for (Result result : resultScan
phoenix git commit: PHOENIX-4303 Fixing getting table from RegionCoprocessorEnvironment(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 d85e9165a -> 754201cfb PHOENIX-4303 Fixing getting table from RegionCoprocessorEnvironment(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/754201cf Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/754201cf Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/754201cf Branch: refs/heads/5.x-HBase-2.0 Commit: 754201cfb982ae9ec7f0e2cb0a0e3ecb8d1ca4a7 Parents: d85e916 Author: Rajeshbabu Chintaguntla Authored: Thu Nov 9 17:12:19 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Nov 9 17:12:19 2017 +0530 -- .../phoenix/end2end/index/LocalIndexIT.java | 14 +++-- .../IndexHalfStoreFileReaderGenerator.java | 2 +- .../DelegateRegionCoprocessorEnvironment.java | 61 +--- .../coprocessor/MetaDataRegionObserver.java | 7 ++- .../index/table/CoprocessorHTableFactory.java | 9 +-- .../hbase/index/write/IndexWriterUtils.java | 3 +- .../write/ParallelWriterIndexCommitter.java | 2 +- .../TrackingParallelWriterIndexCommitter.java | 2 +- .../index/PhoenixIndexFailurePolicy.java| 2 +- .../index/PhoenixTransactionalIndexer.java | 3 +- .../phoenix/mapreduce/AbstractBulkLoadTool.java | 1 - .../query/ConnectionQueryServicesImpl.java | 7 ++- .../stats/DefaultStatisticsCollector.java | 2 +- .../phoenix/schema/stats/StatisticsWriter.java | 2 +- .../transaction/PhoenixTransactionalTable.java | 1 - .../java/org/apache/phoenix/util/IndexUtil.java | 2 +- .../org/apache/phoenix/util/ServerUtil.java | 2 +- 17 files changed, 73 insertions(+), 49 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/754201cf/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java index 615d2aa..238b88e 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java @@ -223,8 +223,11 @@ public class LocalIndexIT extends BaseLocalIndexIT { assertTrue(rs.next()); assertEquals(4, rs.getInt(1)); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); -HTable indexTable = new HTable(admin.getConfiguration(), indexPhysicalTableName); -Pair startEndKeys = indexTable.getStartEndKeys(); +Table indexTable = + admin.getConnection().getTable(TableName.valueOf(indexPhysicalTableName)); +Pair startEndKeys = + admin.getConnection().getRegionLocator(TableName.valueOf(indexPhysicalTableName)) +.getStartEndKeys(); byte[][] startKeys = startEndKeys.getFirst(); byte[][] endKeys = startEndKeys.getSecond(); for (int i = 0; i < startKeys.length; i++) { @@ -423,8 +426,11 @@ public class LocalIndexIT extends BaseLocalIndexIT { conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)"); conn1.createStatement().execute("DROP INDEX " + indexName + " ON " + tableName); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); -HTable table = new HTable(admin.getConfiguration() ,TableName.valueOf(tableName)); -Pair startEndKeys = table.getStartEndKeys(); +Table table = + admin.getConnection().getTable(TableName.valueOf(tableName)); +Pair startEndKeys = + admin.getConnection().getRegionLocator(TableName.valueOf(tableName)) +.getStartEndKeys(); byte[][] startKeys = startEndKeys.getFirst(); byte[][] endKeys = startEndKeys.getSecond(); // No entry should be present in local index table after drop index. http://git-wip-us.apache.org/repos/asf/phoenix/blob/754201cf/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java -- diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java index 992e65f..a50d5ce 100644 --- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGen
[3/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java index c004818..68b36f5 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java @@ -52,7 +52,7 @@ import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; -import org.apache.phoenix.util.KeyValueUtil; +import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.SequenceUtil; import org.apache.phoenix.util.ServerUtil; @@ -84,7 +84,7 @@ public class SequenceRegionObserver implements RegionObserver { byte[] errorCodeBuf = new byte[PInteger.INSTANCE.getByteSize()]; PInteger.INSTANCE.getCodec().encodeInt(errorCode, errorCodeBuf, 0); return Result.create(Collections.singletonList( -(Cell)KeyValueUtil.newKeyValue(row, +PhoenixKeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, errorCodeBuf))); } @@ -139,9 +139,9 @@ public class SequenceRegionObserver implements RegionObserver { } -KeyValue currentValueKV = Sequence.getCurrentValueKV(result); -KeyValue incrementByKV = Sequence.getIncrementByKV(result); -KeyValue cacheSizeKV = Sequence.getCacheSizeKV(result); +Cell currentValueKV = Sequence.getCurrentValueKV(result); +Cell incrementByKV = Sequence.getIncrementByKV(result); +Cell cacheSizeKV = Sequence.getCacheSizeKV(result); long currentValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault()); long incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), SortOrder.getDefault()); @@ -161,15 +161,15 @@ public class SequenceRegionObserver implements RegionObserver { currentValue += incrementBy * cacheSize; // Hold timestamp constant for sequences, so that clients always only see the latest value // regardless of when they connect. -KeyValue newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp); +Cell newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp); put.add(newCurrentValueKV); Sequence.replaceCurrentValueKV(cells, newCurrentValueKV); } else { - KeyValue cycleKV = Sequence.getCycleKV(result); - KeyValue limitReachedKV = Sequence.getLimitReachedKV(result); - KeyValue minValueKV = Sequence.getMinValueKV(result); - KeyValue maxValueKV = Sequence.getMaxValueKV(result); + Cell cycleKV = Sequence.getCycleKV(result); + Cell limitReachedKV = Sequence.getLimitReachedKV(result); + Cell minValueKV = Sequence.getMinValueKV(result); + Cell maxValueKV = Sequence.getMaxValueKV(result); boolean increasingSeq = incrementBy > 0 ? true : false; @@ -179,7 +179,7 @@ public class SequenceRegionObserver implements RegionObserver { boolean limitReached; if (limitReachedKV == null) { limitReached = false; - KeyValue newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp); + Cell newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp); put.add(newLimitReachedKV); Sequence.replaceLimitReachedKV(cells, newLimitReachedKV); } @@ -190,7 +190,7 @@ public class SequenceRegionObserver implements RegionObserver { long minValue; if (minValueKV == null)
[1/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 0454e4211 -> c82cc18d8 http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java -- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java index 3a6de6a..56080f8 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java @@ -25,7 +25,9 @@ import java.util.HashSet; import java.util.Set; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; import org.apache.hadoop.hbase.util.Bytes; @@ -90,8 +92,7 @@ public class TestApplyAndFilterDeletesFilter { byte[] laterFamily = Bytes.toBytes("zfamily"); filter = new ApplyAndFilterDeletesFilter(asSet(laterFamily)); assertEquals(ReturnCode.SKIP, filter.filterKeyValue(kv)); -@SuppressWarnings("deprecation") -KeyValue expected = KeyValue.createFirstOnRow(kv.getRow(), laterFamily, new byte[0]); +KeyValue expected = KeyValueUtil.createFirstOnRow(CellUtil.cloneRow(kv), laterFamily, new byte[0]); assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT, filter.filterKeyValue(next)); assertEquals("Didn't get correct next key with a next family", expected, http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java -- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java index dbf67fc..0204cd1 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java @@ -34,8 +34,9 @@ import java.util.Map; import java.util.Properties; import java.util.Set; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -123,14 +124,14 @@ public class IndexMaintainerTest extends BaseConnectionlessQueryTest { stmt.setObject(i+1, values[i]); } stmt.execute(); - Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); -List dataKeyValues = iterator.next().getSecond(); + Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); +List dataKeyValues = iterator.next().getSecond(); Map valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size()); -byte[] row = dataKeyValues.get(0).getRow(); - ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(row); -Put dataMutation = new Put(rowKeyPtr.copyBytes()); -for (KeyValue kv : dataKeyValues) { -valueMap.put(new ColumnReference(kv.getFamily(),kv.getQualifier()), kv.getValue()); + ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(dataKeyValues.get(0).getRowArray(), dataKeyValues.get(0).getRowOffset(), dataKeyValues.get(0).getRowLength()); +byte[] row = rowKeyPtr.copyBytes(); +Put dataMutation = new Put(row); +for (Cell kv : dataKeyValues) { +valueMap.put(new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), CellUtil.cloneValue(kv)); dataMutation.add(kv); } ValueGetter valueGetter = newValueGetter(row, valueMap); @@ -148,7 +149,7 @@ public class IndexMaintainerTest extends BaseConnectionlessQueryTest { valueMap.get(ref); } byte[] dataRowKey = im1.buildDataRowKey(indexKeyPtr, null); -assertArrayEquals(dataRowKey, dataKeyValues.get(0).getRow()); +assertArrayEquals(dataRowKey, CellUtil.cloneRow(dataKeyValues.get(0))); } finally { try { conn.createStatement().execute("DROP TAB
[4/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)
PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c82cc18d Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c82cc18d Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c82cc18d Branch: refs/heads/5.x-HBase-2.0 Commit: c82cc18d8432baba8e2cbd10af121cd39f83ca05 Parents: 0454e42 Author: Rajeshbabu Chintaguntla Authored: Tue Nov 14 10:00:00 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Nov 14 10:00:00 2017 +0530 -- ...ReplayWithIndexWritesAndCompressedWALIT.java | 2 +- .../phoenix/end2end/MappingTableDataTypeIT.java | 6 +- .../apache/phoenix/end2end/RowTimestampIT.java | 8 +- .../phoenix/end2end/index/DropColumnIT.java | 6 +- .../phoenix/end2end/index/ImmutableIndexIT.java | 4 +- .../phoenix/end2end/index/IndexTestUtil.java| 4 +- .../DataTableLocalIndexRegionScanner.java | 3 +- .../regionserver/IndexHalfStoreFileReader.java | 4 +- .../IndexHalfStoreFileReaderGenerator.java | 5 +- .../regionserver/IndexKeyValueSkipListSet.java | 16 +- .../hbase/regionserver/KeyValueSkipListSet.java | 57 ++--- .../LocalIndexStoreFileScanner.java | 24 +- .../regionserver/wal/IndexedWALEditCodec.java | 6 +- .../phoenix/cache/aggcache/SpillManager.java| 7 +- .../cache/aggcache/SpillableGroupByCache.java | 4 +- .../phoenix/compile/ListJarsQueryPlan.java | 7 +- .../MutatingParallelIteratorFactory.java| 6 +- .../apache/phoenix/compile/TraceQueryPlan.java | 11 +- .../GroupedAggregateRegionObserver.java | 13 +- .../coprocessor/MetaDataEndpointImpl.java | 215 +--- .../coprocessor/SequenceRegionObserver.java | 46 ++-- .../UngroupedAggregateRegionObserver.java | 16 +- .../apache/phoenix/execute/MutationState.java | 9 +- .../phoenix/execute/SortMergeJoinPlan.java | 6 +- .../apache/phoenix/execute/TupleProjector.java | 17 +- .../phoenix/filter/DistinctPrefixFilter.java| 4 +- .../hbase/index/builder/BaseIndexBuilder.java | 4 +- .../hbase/index/builder/IndexBuilder.java | 4 +- .../phoenix/hbase/index/covered/Batch.java | 9 +- .../hbase/index/covered/KeyValueStore.java | 6 +- .../hbase/index/covered/LocalTableState.java| 29 +-- .../hbase/index/covered/NonTxIndexBuilder.java | 5 +- .../phoenix/hbase/index/covered/TableState.java | 4 +- .../hbase/index/covered/data/IndexMemStore.java | 53 ++-- .../index/covered/data/LocalHBaseState.java | 4 +- .../filter/ApplyAndFilterDeletesFilter.java | 21 +- .../covered/filter/MaxTimestampFilter.java | 11 +- .../index/covered/update/ColumnReference.java | 3 +- .../index/scanner/FilteredKeyValueScanner.java | 4 +- .../index/util/GenericKeyValueBuilder.java | 7 +- .../hbase/index/util/IndexManagementUtil.java | 10 +- .../hbase/index/util/KeyValueBuilder.java | 6 +- .../hbase/index/wal/IndexedKeyValue.java| 10 +- .../phoenix/hbase/index/wal/KeyValueCodec.java | 2 - .../apache/phoenix/index/IndexMaintainer.java | 9 +- .../phoenix/index/PhoenixIndexBuilder.java | 6 +- .../index/PhoenixTransactionalIndexer.java | 8 +- .../BaseGroupedAggregatingResultIterator.java | 8 +- .../GroupedAggregatingResultIterator.java | 4 +- .../iterate/MappedByteBufferSortedQueue.java| 5 +- .../NonAggregateRegionScannerFactory.java | 4 +- .../RowKeyOrderedAggregateResultIterator.java | 4 +- .../UngroupedAggregatingResultIterator.java | 4 +- .../phoenix/jdbc/PhoenixDatabaseMetaData.java | 7 +- .../apache/phoenix/jdbc/PhoenixStatement.java | 10 +- .../mapreduce/FormatToBytesWritableMapper.java | 24 +- .../ImportPreUpsertKeyValueProcessor.java | 3 +- .../mapreduce/MultiHfileOutputFormat.java | 3 +- .../index/PhoenixIndexImportMapper.java | 12 +- .../index/PhoenixIndexPartialBuildMapper.java | 2 +- .../org/apache/phoenix/schema/PTableImpl.java | 1 - .../org/apache/phoenix/schema/Sequence.java | 92 +++ .../stats/DefaultStatisticsCollector.java | 10 +- .../schema/tuple/MultiKeyValueTuple.java| 4 +- .../schema/tuple/PositionBasedResultTuple.java | 10 +- .../phoenix/schema/tuple/ResultTuple.java | 12 +- .../java/org/apache/phoenix/util/IndexUtil.java | 31 +-- .../org/apache/phoenix/util/KeyValueUtil.java | 238 -- .../org/apache/phoenix/util/MetaDataUtil.java | 2 +- .../phoenix/util/PhoenixKeyValueUtil.java | 245 +++ .../org/apache/phoenix/util/PhoenixRuntime.java | 19 +- .../org/apache/phoenix/util/ResultUtil.java | 12 +- .../java/org/apache/phoenix/util/TupleUtil.java | 5 +- .../org/apache/phoenix/util
[2/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java index dff9ef2..22d40d4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java @@ -17,6 +17,7 @@ */ package org.apache.phoenix.mapreduce; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import java.util.List; @@ -43,6 +44,6 @@ public interface ImportPreUpsertKeyValueProcessor { * @param keyValues list of KeyValues that are to be written to an HFile * @return the list that will actually be written */ -List preUpsert(byte[] rowKey, List keyValues); +List preUpsert(byte[] rowKey, List keyValues); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java index c888b7d..bb38923 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java @@ -72,6 +72,7 @@ import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair; import org.apache.phoenix.mapreduce.bulkload.TargetTableRef; import org.apache.phoenix.mapreduce.bulkload.TargetTableRefFunctions; import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -144,7 +145,7 @@ public class MultiHfileOutputFormat extends FileOutputFormathttp://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java index 9e0d629..6f469e6 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Properties; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; @@ -37,6 +38,7 @@ import org.apache.phoenix.mapreduce.PhoenixJobCounters; import org.apache.phoenix.mapreduce.util.ConnectionUtil; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.util.ColumnInfo; +import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -100,18 +102,18 @@ public class PhoenixIndexImportMapper extends Mapper>> uncommittedDataIterator = PhoenixRuntime.getUncommittedDataIterator(connection, true); + final Iterator>> uncommittedDataIterator = PhoenixRuntime.getUncommittedDataIterator(connection, true); while (uncommittedDataIterator.hasNext()) { -Pair> kvPair = uncommittedDataIterator.next(); +Pair> kvPair = uncommittedDataIterator.next(); if (Bytes.compareTo(Bytes.toBytes(indexTableName), kvPair.getFirst()) != 0) { // skip edits for other tables continue; } -List keyValueList = kvPair.getSecond(); +List keyValueList = kvPair.getSecond(); keyValueList = preUpdateProcessor.preUpsert(kvPair.getFirst(), keyValueList); -for (KeyValue kv : keyValueList) { +for (Cell kv : keyValueList) { outputKey.set(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); -context.write(outputKey, kv); +context.write(outputKey, PhoenixKeyValueUtil.maybeCopyCell(kv)); } context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment(1); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/s
phoenix git commit: PHOENIX-4318 Fix IndexHalfStoreFileReader and related classes(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 5bdd3b2d4 -> 8ab7cc142 PHOENIX-4318 Fix IndexHalfStoreFileReader and related classes(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8ab7cc14 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8ab7cc14 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8ab7cc14 Branch: refs/heads/5.x-HBase-2.0 Commit: 8ab7cc142473e964ee124b5f9163ff341452f6b0 Parents: 5bdd3b2 Author: Rajeshbabu Chintaguntla Authored: Fri Nov 17 19:47:09 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri Nov 17 19:47:09 2017 +0530 -- .../regionserver/IndexHalfStoreFileReader.java | 23 ++- .../IndexHalfStoreFileReaderGenerator.java | 193 +-- .../hbase/regionserver/LocalIndexSplitter.java | 4 +- .../LocalIndexStoreFileScanner.java | 29 +-- .../hbase/regionserver/ScannerContextUtil.java | 7 +- .../UngroupedAggregateRegionObserver.java | 1 - .../java/org/apache/phoenix/util/IndexUtil.java | 8 +- 7 files changed, 94 insertions(+), 171 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ab7cc14/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java -- diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java index 4b6b7e2..8e590f6 100644 --- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java +++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -26,6 +27,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.Reference; @@ -47,7 +49,7 @@ import org.apache.phoenix.index.IndexMaintainer; * This file is not splitable. Calls to {@link #midkey()} return null. */ -public class IndexHalfStoreFileReader extends StoreFile.Reader { +public class IndexHalfStoreFileReader extends StoreFileReader { private final boolean top; // This is the key we split around. Its the first possible entry on a row: // i.e. empty column and a timestamp of LATEST_TIMESTAMP. @@ -56,7 +58,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader { private final Map indexMaintainers; private final byte[][] viewConstants; private final int offset; -private final HRegionInfo regionInfo; +private final RegionInfo regionInfo; private final byte[] regionStartKeyInHFile; /** @@ -78,9 +80,10 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader { final FSDataInputStreamWrapper in, long size, final Reference r, final Configuration conf, final Map indexMaintainers, -final byte[][] viewConstants, final HRegionInfo regionInfo, -byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException { -super(fs, p, in, size, cacheConf, conf); +final byte[][] viewConstants, final RegionInfo regionInfo, +byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile) throws IOException { +super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, new AtomicInteger(0), false, +conf); this.splitkey = splitKey == null ? r.getSplitKey() : splitKey; // Is it top or bottom half? this.top = Reference.isTopFileRegion(r.getFileRegion()); @@ -104,7 +107,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader { return indexMaintainers; } -public HRegionInfo getRegionInfo() { +public RegionInfo getRegionInfo() { return regionInfo; } @@ -123,4 +126,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader { public boolean isTop() { return top; } + +@Override +public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, +boolean isCompaction, long readPt, long scannerOrder, +boolean canOptimizeForNonNullColumn) { +ret
phoenix git commit: PHOENIX-4304 adding checks whether coprocesseros presently already before adding again(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 8e5b4131b -> 06bb5d95a PHOENIX-4304 adding checks whether coprocesseros presently already before adding again(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/06bb5d95 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/06bb5d95 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/06bb5d95 Branch: refs/heads/5.x-HBase-2.0 Commit: 06bb5d95ada56ccd86b538b29d12468a096ff376 Parents: 8e5b413 Author: Rajeshbabu Chintaguntla Authored: Tue Nov 28 09:45:46 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Nov 28 09:45:46 2017 +0530 -- .../phoenix/query/ConnectionQueryServicesImpl.java| 14 ++ 1 file changed, 10 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/06bb5d95/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index 2077272..bcc5aa4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -833,13 +833,19 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement // The phoenix jar must be available on HBase classpath int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY); try { +TableDescriptor newDesc = builder.build(); +if(newDesc.hasCoprocessor(ScanRegionObserver.class.getName())) { builder.addCoprocessor(ScanRegionObserver.class.getName(), null, priority, null); - +} + if(newDesc.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName())) { builder.addCoprocessor(UngroupedAggregateRegionObserver.class.getName(), null, priority, null); - +} + if(newDesc.hasCoprocessor(GroupedAggregateRegionObserver.class.getName())) { builder.addCoprocessor(GroupedAggregateRegionObserver.class.getName(), null, priority, null); - +} + if(newDesc.hasCoprocessor(ServerCachingEndpointImpl.class.getName())) { builder.addCoprocessor(ServerCachingEndpointImpl.class.getName(), null, priority, null); +} boolean isTransactional = Boolean.TRUE.equals(tableProps.get(TableProperty.TRANSACTIONAL.name())) || Boolean.TRUE.equals(tableProps.get(PhoenixTransactionContext.READ_NON_TX_DATA)); // For ALTER TABLE @@ -855,7 +861,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement // For alter table, remove non transactional index coprocessor builder.removeCoprocessor(Indexer.class.getName()); } else { -if (!builder.build().hasCoprocessor(Indexer.class.getName())) { +if (!newDesc.hasCoprocessor(Indexer.class.getName())) { // If exception on alter table to transition back to non transactional builder.removeCoprocessor(PhoenixTransactionalIndexer.class.getName()); Map opts = Maps.newHashMapWithExpectedSize(1);
phoenix git commit: PHOENIX-4304 fixing the coprocessor checks(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 06bb5d95a -> a3d6c774a PHOENIX-4304 fixing the coprocessor checks(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a3d6c774 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a3d6c774 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a3d6c774 Branch: refs/heads/5.x-HBase-2.0 Commit: a3d6c774ad309cd2bd0895ee4d5cb7172a03aa20 Parents: 06bb5d9 Author: Rajeshbabu Chintaguntla Authored: Tue Nov 28 10:20:52 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Nov 28 10:20:52 2017 +0530 -- .../apache/phoenix/query/ConnectionQueryServicesImpl.java| 8 .../org/apache/phoenix/jdbc/SecureUserConnectionsTest.java | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/a3d6c774/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index bcc5aa4..98279e0 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -834,16 +834,16 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY); try { TableDescriptor newDesc = builder.build(); -if(newDesc.hasCoprocessor(ScanRegionObserver.class.getName())) { +if(!newDesc.hasCoprocessor(ScanRegionObserver.class.getName())) { builder.addCoprocessor(ScanRegionObserver.class.getName(), null, priority, null); } - if(newDesc.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName())) { + if(!newDesc.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName())) { builder.addCoprocessor(UngroupedAggregateRegionObserver.class.getName(), null, priority, null); } - if(newDesc.hasCoprocessor(GroupedAggregateRegionObserver.class.getName())) { + if(!newDesc.hasCoprocessor(GroupedAggregateRegionObserver.class.getName())) { builder.addCoprocessor(GroupedAggregateRegionObserver.class.getName(), null, priority, null); } - if(newDesc.hasCoprocessor(ServerCachingEndpointImpl.class.getName())) { + if(!newDesc.hasCoprocessor(ServerCachingEndpointImpl.class.getName())) { builder.addCoprocessor(ServerCachingEndpointImpl.class.getName(), null, priority, null); } boolean isTransactional = http://git-wip-us.apache.org/repos/asf/phoenix/blob/a3d6c774/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java -- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java index 5a99b69..0fee08a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/SecureUserConnectionsTest.java @@ -46,6 +46,7 @@ import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.ReadOnlyProps; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; /** @@ -54,6 +55,7 @@ import org.junit.Test; * a ConcurrentHashMap. We can use a HashSet to determine when instances of ConnectionInfo * collide and when they do not. */ +@Ignore public class SecureUserConnectionsTest { private static final Log LOG = LogFactory.getLog(SecureUserConnectionsTest.class); private static final int KDC_START_ATTEMPTS = 10;
phoenix git commit: PHOENIX-4409 Fix all IT tests failing with NPE in Encoded columns(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 c75d766bc -> 1c5caff2b PHOENIX-4409 Fix all IT tests failing with NPE in Encoded columns(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1c5caff2 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1c5caff2 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1c5caff2 Branch: refs/heads/5.x-HBase-2.0 Commit: 1c5caff2bd439a56fb065360799fb36651da0bba Parents: c75d766 Author: Rajeshbabu Chintaguntla Authored: Tue Nov 28 13:06:38 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Nov 28 13:06:38 2017 +0530 -- .../phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c5caff2/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 602df4b..a3d219f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -307,7 +307,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver @Override public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext c, Scan scan) throws IOException { - +super.preScannerOpen(c, scan); if (ScanUtil.isAnalyzeTable(scan)) { // We are setting the start row and stop row such that it covers the entire region. As part // of Phonenix-1263 we are storing the guideposts against the physical table rather than
phoenix git commit: PHOENIX-4304 Adding coprocessor existence checks to avoid failures.(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 d3b192777 -> 30ff6d5bb PHOENIX-4304 Adding coprocessor existence checks to avoid failures.(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/30ff6d5b Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/30ff6d5b Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/30ff6d5b Branch: refs/heads/5.x-HBase-2.0 Commit: 30ff6d5bbca230ccfba9cbfeceea34b06cf9b022 Parents: d3b1927 Author: Rajeshbabu Chintaguntla Authored: Tue Nov 28 14:12:33 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Nov 28 14:12:33 2017 +0530 -- .../query/ConnectionQueryServicesImpl.java | 28 +--- 1 file changed, 24 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/30ff6d5b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index b31e112..6f84915 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -857,20 +857,27 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement && !SchemaUtil.isMetaTable(tableName) && !SchemaUtil.isStatsTable(tableName)) { if (isTransactional) { + if(!newDesc.hasCoprocessor(PhoenixTransactionalIndexer.class.getName())) { builder.addCoprocessor(PhoenixTransactionalIndexer.class.getName(), null, priority, null); +} // For alter table, remove non transactional index coprocessor +if(newDesc.hasCoprocessor(Indexer.class.getName())) { builder.removeCoprocessor(Indexer.class.getName()); +} } else { if (!newDesc.hasCoprocessor(Indexer.class.getName())) { // If exception on alter table to transition back to non transactional +if (newDesc.hasCoprocessor(PhoenixTransactionalIndexer.class.getName())) { builder.removeCoprocessor(PhoenixTransactionalIndexer.class.getName()); +} Map opts = Maps.newHashMapWithExpectedSize(1); opts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); Indexer.enableIndexing(builder, PhoenixIndexBuilder.class, opts, priority); } } } -if (SchemaUtil.isStatsTable(tableName)) { +if ((SchemaUtil.isStatsTable(tableName) || SchemaUtil.isMetaTable(tableName)) +&& !newDesc.hasCoprocessor(MultiRowMutationEndpoint.class.getName())) { builder.addCoprocessor(MultiRowMutationEndpoint.class.getName(), null, priority, null); } @@ -878,28 +885,40 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement Set familiesKeys = builder.build().getColumnFamilyNames(); for(byte[] family: familiesKeys) { if(Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + if(!newDesc.hasCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName())) { builder.addCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName(), -null, priority, null); +null, priority, null); break; +} } } // Setup split policy on Phoenix metadata table to ensure that the key values of a Phoenix table // stay on the same region. if (SchemaUtil.isMetaTable(tableName) || SchemaUtil.isFunctionTable(tableName)) { +if (!newDesc.hasCoprocessor(MetaDataEndpointImpl.class.getName())) { builder.addCoprocessor(MetaDataEndpointImpl.class.getName(), null, priority, null); +} if(SchemaUtil.isMetaTable(tableName) ) { +if (!newDesc.hasCoprocessor(MetaDataRegionObserver.class.getName())) { builder.addCoprocessor(MetaDataRegionObserver.class.getName()
phoenix git commit: PHOENIX-4411 Fix all IT tests failing with ClassCastException with new cell interface(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 fd4c94d24 -> ab56b3de5 PHOENIX-4411 Fix all IT tests failing with ClassCastException with new cell interface(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ab56b3de Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ab56b3de Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ab56b3de Branch: refs/heads/5.x-HBase-2.0 Commit: ab56b3de58f55e5425584016786d1f9ba42e28b3 Parents: fd4c94d Author: Rajeshbabu Chintaguntla Authored: Thu Nov 30 10:24:57 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Nov 30 10:24:57 2017 +0530 -- .../phoenix/coprocessor/SequenceRegionObserver.java | 15 +-- .../phoenix/query/ConnectionQueryServicesImpl.java | 6 -- .../phoenix/jdbc/SecureUserConnectionsTest.java | 1 - 3 files changed, 13 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/ab56b3de/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java index 3ee0973..75ab61b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java @@ -26,6 +26,7 @@ import java.util.Optional; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Append; @@ -107,8 +108,9 @@ public class SequenceRegionObserver implements RegionObserver, RegionCoprocessor * @since 3.0.0 */ @Override -public Result preIncrement(final ObserverContext e, -final Increment increment) throws IOException { +public Result preIncrement( + org.apache.hadoop.hbase.coprocessor.ObserverContext e, +Increment increment) throws IOException { RegionCoprocessorEnvironment env = e.getEnvironment(); // We need to set this to prevent region.increment from being called e.bypass(); @@ -327,8 +329,9 @@ public class SequenceRegionObserver implements RegionObserver, RegionCoprocessor */ @SuppressWarnings("deprecation") @Override -public Result preAppend(final ObserverContext e, -final Append append) throws IOException { +public Result preAppend( + org.apache.hadoop.hbase.coprocessor.ObserverContext e, +Append append) throws IOException { byte[] opBuf = append.getAttribute(OPERATION_ATTRIB); if (opBuf == null) { return null; @@ -395,7 +398,7 @@ public class SequenceRegionObserver implements RegionObserver, RegionCoprocessor Mutation m = null; switch (op) { case RETURN_SEQUENCE: -KeyValue currentValueKV = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(result.rawCells()[0]); +KeyValue currentValueKV = PhoenixKeyValueUtil.maybeCopyCell(result.rawCells()[0]); long expectedValue = PLong.INSTANCE.getCodec().decodeLong(append.getAttribute(CURRENT_VALUE_ATTRIB), 0, SortOrder.getDefault()); long value = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault()); @@ -419,7 +422,7 @@ public class SequenceRegionObserver implements RegionObserver, RegionCoprocessor if (!hadClientTimestamp) { for (List kvs : m.getFamilyCellMap().values()) { for (Cell kv : kvs) { - ((KeyValue)kv).updateLatestStamp(clientTimestampBuf); + ((ExtendedCell)kv).setTimestamp(clientTimestampBuf, 0); } } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/ab56b3de/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index 6f84915..9485887 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/
phoenix git commit: PHOENIX-4422 Connection to server is very slow.(Sergey Soldatov)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 ab56b3de5 -> 321cd47d8 PHOENIX-4422 Connection to server is very slow.(Sergey Soldatov) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/321cd47d Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/321cd47d Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/321cd47d Branch: refs/heads/5.x-HBase-2.0 Commit: 321cd47d8d47b1771e7b3844f4d23e0f47c79f7e Parents: ab56b3d Author: Rajeshbabu Chintaguntla Authored: Thu Nov 30 16:03:08 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Nov 30 16:03:08 2017 +0530 -- .../org/apache/phoenix/query/ConnectionQueryServicesImpl.java | 7 --- 1 file changed, 4 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/321cd47d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index 9485887..b852316 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -1123,12 +1123,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement } newDesc.removeValue(Bytes.toBytes(PhoenixTransactionContext.READ_NON_TX_DATA)); } -if (existingDesc.equals(newDesc)) { +TableDescriptor result = newDesc.build(); +if (existingDesc.equals(result)) { return null; // Indicate that no metadata was changed } -modifyTable(physicalTableName, newDesc.build(), true); -return newDesc.build(); +modifyTable(physicalTableName, result, true); +return result; } } catch (IOException e) {
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 8dc47e807 -> 4f97569fd PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4f97569f Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4f97569f Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4f97569f Branch: refs/heads/5.x-HBase-2.0 Commit: 4f97569fde4fa24989e8550eea2d76f52ac462a5 Parents: 8dc47e8 Author: Rajeshbabu Chintaguntla Authored: Sat Dec 9 09:46:09 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Sat Dec 9 09:46:09 2017 +0530 -- .../java/org/apache/phoenix/hbase/index/write/IndexWriter.java | 3 ++- .../main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java | 5 - .../org/apache/phoenix/query/ConnectionQueryServicesImpl.java | 4 +--- .../org/apache/phoenix/hbase/index/write/TestIndexWriter.java | 3 --- 4 files changed, 3 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f97569f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java index 6b57025..4e5e182 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.hbase.index.exception.IndexWriteException; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.index.PhoenixIndexFailurePolicy; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; @@ -88,7 +89,7 @@ public class IndexWriter implements Stoppable { Configuration conf = env.getConfiguration(); try { IndexFailurePolicy committer = - conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, KillServerOnFailurePolicy.class, + conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, PhoenixIndexFailurePolicy.class, IndexFailurePolicy.class).newInstance(); return committer; } catch (InstantiationException e) { http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f97569f/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java index 5b76572..f36a9c5 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java @@ -102,11 +102,6 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { @Override public void setup(RegionCoprocessorEnvironment env) throws IOException { super.setup(env); -Configuration conf = env.getConfiguration(); -// Install handler that will attempt to disable the index first before killing the region -// server -conf.setIfUnset(IndexWriter.INDEX_FAILURE_POLICY_CONF_KEY, -PhoenixIndexFailurePolicy.class.getName()); } @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f97569f/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index b852316..82100c8 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -906,9 +906,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement } } else if (SchemaUtil.isSequenceTable(tableName)) { if(!newDesc.hasCoprocessor(SequenceRegionObserver.class.getName())) { -// Just giving more priority to this coprocessor till HBASE-19384 get's fixed -// because in HBase 2.0 the bypass is not working as old versions. - builder.addCoprocessor(SequenceRegionObserver.class.getName
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master c075a1787 -> 334eb15b4 PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/334eb15b Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/334eb15b Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/334eb15b Branch: refs/heads/master Commit: 334eb15b4a7a80ce8d4e1c1dc09b7724663fc4da Parents: c075a17 Author: Rajeshbabu Chintaguntla Authored: Sat Dec 9 09:48:59 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Sat Dec 9 09:48:59 2017 +0530 -- .../java/org/apache/phoenix/hbase/index/write/IndexWriter.java | 3 ++- .../main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java | 5 - 2 files changed, 2 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/334eb15b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java index 6b57025..4e5e182 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.hbase.index.exception.IndexWriteException; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.index.PhoenixIndexFailurePolicy; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; @@ -88,7 +89,7 @@ public class IndexWriter implements Stoppable { Configuration conf = env.getConfiguration(); try { IndexFailurePolicy committer = - conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, KillServerOnFailurePolicy.class, + conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, PhoenixIndexFailurePolicy.class, IndexFailurePolicy.class).newInstance(); return committer; } catch (InstantiationException e) { http://git-wip-us.apache.org/repos/asf/phoenix/blob/334eb15b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java index 679c5df..8b1e2f1 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java @@ -102,11 +102,6 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { @Override public void setup(RegionCoprocessorEnvironment env) throws IOException { super.setup(env); -Configuration conf = env.getConfiguration(); -// Install handler that will attempt to disable the index first before killing the region -// server -conf.setIfUnset(IndexWriter.INDEX_FAILURE_POLICY_CONF_KEY, -PhoenixIndexFailurePolicy.class.getName()); } @Override
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 2aa17ffe1 -> fadb837af PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fadb837a Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fadb837a Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fadb837a Branch: refs/heads/4.x-HBase-1.2 Commit: fadb837afb3846f56ed801cb1d9457e7c9f14a88 Parents: 2aa17ff Author: Rajeshbabu Chintaguntla Authored: Sat Dec 9 10:24:12 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Sat Dec 9 10:24:12 2017 +0530 -- .../java/org/apache/phoenix/hbase/index/write/IndexWriter.java | 3 ++- .../main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java | 5 - 2 files changed, 2 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/fadb837a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java index 6b57025..4e5e182 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.hbase.index.exception.IndexWriteException; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.index.PhoenixIndexFailurePolicy; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; @@ -88,7 +89,7 @@ public class IndexWriter implements Stoppable { Configuration conf = env.getConfiguration(); try { IndexFailurePolicy committer = - conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, KillServerOnFailurePolicy.class, + conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, PhoenixIndexFailurePolicy.class, IndexFailurePolicy.class).newInstance(); return committer; } catch (InstantiationException e) { http://git-wip-us.apache.org/repos/asf/phoenix/blob/fadb837a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java index 679c5df..8b1e2f1 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java @@ -102,11 +102,6 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { @Override public void setup(RegionCoprocessorEnvironment env) throws IOException { super.setup(env); -Configuration conf = env.getConfiguration(); -// Install handler that will attempt to disable the index first before killing the region -// server -conf.setIfUnset(IndexWriter.INDEX_FAILURE_POLICY_CONF_KEY, -PhoenixIndexFailurePolicy.class.getName()); } @Override
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 b394788f3 -> d1679602a PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d1679602 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d1679602 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d1679602 Branch: refs/heads/4.x-HBase-1.1 Commit: d1679602a3f4e38da51fc7ed9cf8b4fa9734b2e8 Parents: b394788 Author: Rajeshbabu Chintaguntla Authored: Sat Dec 9 10:28:19 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Sat Dec 9 10:28:19 2017 +0530 -- .../java/org/apache/phoenix/hbase/index/write/IndexWriter.java | 3 ++- .../main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java | 5 - 2 files changed, 2 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1679602/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java index 6b57025..4e5e182 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.hbase.index.exception.IndexWriteException; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.index.PhoenixIndexFailurePolicy; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; @@ -88,7 +89,7 @@ public class IndexWriter implements Stoppable { Configuration conf = env.getConfiguration(); try { IndexFailurePolicy committer = - conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, KillServerOnFailurePolicy.class, + conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, PhoenixIndexFailurePolicy.class, IndexFailurePolicy.class).newInstance(); return committer; } catch (InstantiationException e) { http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1679602/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java index 679c5df..8b1e2f1 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java @@ -102,11 +102,6 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { @Override public void setup(RegionCoprocessorEnvironment env) throws IOException { super.setup(env); -Configuration conf = env.getConfiguration(); -// Install handler that will attempt to disable the index first before killing the region -// server -conf.setIfUnset(IndexWriter.INDEX_FAILURE_POLICY_CONF_KEY, -PhoenixIndexFailurePolicy.class.getName()); } @Override
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-0.98 c14857374 -> 60a65efe8 PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/60a65efe Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/60a65efe Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/60a65efe Branch: refs/heads/4.x-HBase-0.98 Commit: 60a65efe895268c3cab69bafdcf6fd03340ebd60 Parents: c148573 Author: Rajeshbabu Chintaguntla Authored: Sat Dec 9 10:32:55 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Sat Dec 9 10:32:55 2017 +0530 -- .../java/org/apache/phoenix/hbase/index/write/IndexWriter.java | 3 ++- .../main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java | 5 - 2 files changed, 2 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/60a65efe/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java index 6b57025..4e5e182 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.hbase.index.exception.IndexWriteException; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.index.PhoenixIndexFailurePolicy; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; @@ -88,7 +89,7 @@ public class IndexWriter implements Stoppable { Configuration conf = env.getConfiguration(); try { IndexFailurePolicy committer = - conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, KillServerOnFailurePolicy.class, + conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, PhoenixIndexFailurePolicy.class, IndexFailurePolicy.class).newInstance(); return committer; } catch (InstantiationException e) { http://git-wip-us.apache.org/repos/asf/phoenix/blob/60a65efe/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java index 49053ec..da434f9 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java @@ -101,11 +101,6 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { @Override public void setup(RegionCoprocessorEnvironment env) throws IOException { super.setup(env); -Configuration conf = env.getConfiguration(); -// Install handler that will attempt to disable the index first before killing the region -// server -conf.setIfUnset(IndexWriter.INDEX_FAILURE_POLICY_CONF_KEY, -PhoenixIndexFailurePolicy.class.getName()); } @Override
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master 334eb15b4 -> 1a19d1ecb PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1a19d1ec Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1a19d1ec Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1a19d1ec Branch: refs/heads/master Commit: 1a19d1ecbd38f2b7ee406df8efa05d29f685ef57 Parents: 334eb15 Author: Rajeshbabu Chintaguntla Authored: Tue Dec 12 15:29:20 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Dec 12 15:29:20 2017 +0530 -- .../wal/WALReplayWithIndexWritesAndCompressedWALIT.java| 2 ++ 1 file changed, 2 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/1a19d1ec/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index 042dd88..e2ddd4e 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -60,6 +60,7 @@ import org.apache.phoenix.hbase.index.covered.ColumnGroup; import org.apache.phoenix.hbase.index.covered.CoveredColumn; import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder; import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil; +import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.util.ConfigUtil; import org.junit.After; import org.junit.Before; @@ -100,6 +101,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { setupCluster(); Path hbaseRootDir = UTIL.getDataTestDir(); this.conf = HBaseConfiguration.create(UTIL.getConfiguration()); +this.conf.setBoolean(QueryServices.INDEX_FAILURE_THROW_EXCEPTION_ATTRIB, false); this.fs = UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 4f97569fd -> 25d3c1369 PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/25d3c136 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/25d3c136 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/25d3c136 Branch: refs/heads/5.x-HBase-2.0 Commit: 25d3c13691818f7f2afdcac673a40222e228c4a0 Parents: 4f97569 Author: Rajeshbabu Chintaguntla Authored: Tue Dec 12 15:41:40 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Dec 12 15:41:40 2017 +0530 -- .../wal/WALReplayWithIndexWritesAndCompressedWALIT.java| 2 ++ 1 file changed, 2 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/25d3c136/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index 2c6467a..c8accfd 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -65,6 +65,7 @@ import org.apache.phoenix.hbase.index.covered.ColumnGroup; import org.apache.phoenix.hbase.index.covered.CoveredColumn; import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder; import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil; +import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.util.ConfigUtil; import org.junit.After; import org.junit.Before; @@ -104,6 +105,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { public void setUp() throws Exception { setupCluster(); this.conf = HBaseConfiguration.create(UTIL.getConfiguration()); +this.conf.setBoolean(QueryServices.INDEX_FAILURE_THROW_EXCEPTION_ATTRIB, false); this.fs = UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 fadb837af -> f93443c5d PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f93443c5 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f93443c5 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f93443c5 Branch: refs/heads/4.x-HBase-1.2 Commit: f93443c5d669edad4617fc06653f02dad680258c Parents: fadb837 Author: Rajeshbabu Chintaguntla Authored: Tue Dec 12 15:43:29 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Dec 12 15:43:29 2017 +0530 -- .../wal/WALReplayWithIndexWritesAndCompressedWALIT.java| 2 ++ 1 file changed, 2 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/f93443c5/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index a7f17ec..542e640 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -60,6 +60,7 @@ import org.apache.phoenix.hbase.index.covered.ColumnGroup; import org.apache.phoenix.hbase.index.covered.CoveredColumn; import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder; import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil; +import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.util.ConfigUtil; import org.junit.After; import org.junit.Before; @@ -100,6 +101,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { setupCluster(); Path hbaseRootDir = UTIL.getDataTestDir(); this.conf = HBaseConfiguration.create(UTIL.getConfiguration()); +this.conf.setBoolean(QueryServices.INDEX_FAILURE_THROW_EXCEPTION_ATTRIB, false); this.fs = UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 d1679602a -> 441539d1b PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/441539d1 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/441539d1 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/441539d1 Branch: refs/heads/4.x-HBase-1.1 Commit: 441539d1b86a35e38b56460591578892e24242d5 Parents: d167960 Author: Rajeshbabu Chintaguntla Authored: Tue Dec 12 15:44:16 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Dec 12 15:44:16 2017 +0530 -- .../wal/WALReplayWithIndexWritesAndCompressedWALIT.java| 2 ++ 1 file changed, 2 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/441539d1/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index 5c29f7c..dd885b0 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -58,6 +58,7 @@ import org.apache.phoenix.hbase.index.covered.ColumnGroup; import org.apache.phoenix.hbase.index.covered.CoveredColumn; import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder; import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil; +import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.util.ConfigUtil; import org.junit.After; import org.junit.Before; @@ -98,6 +99,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { setupCluster(); Path hbaseRootDir = UTIL.getDataTestDir(); this.conf = HBaseConfiguration.create(UTIL.getConfiguration()); +this.conf.setBoolean(QueryServices.INDEX_FAILURE_THROW_EXCEPTION_ATTRIB, false); this.fs = UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
phoenix git commit: PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-0.98 60a65efe8 -> c4706109e PHOENIX-4446 Sequence table region opening failing because of property setting attempt on read-only configuration-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c4706109 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c4706109 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c4706109 Branch: refs/heads/4.x-HBase-0.98 Commit: c4706109e3236035b093d3dfbfb6f5de16845edc Parents: 60a65ef Author: Rajeshbabu Chintaguntla Authored: Tue Dec 12 15:46:28 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Dec 12 15:46:28 2017 +0530 -- .../wal/WALReplayWithIndexWritesAndCompressedWALIT.java| 2 ++ 1 file changed, 2 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/c4706109/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java index 525cc34..06b7f6c 100644 --- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java +++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java @@ -55,6 +55,7 @@ import org.apache.phoenix.hbase.index.covered.ColumnGroup; import org.apache.phoenix.hbase.index.covered.CoveredColumn; import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder; import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil; +import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.util.ConfigUtil; import org.junit.After; import org.junit.Before; @@ -94,6 +95,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT { public void setUp() throws Exception { setupCluster(); this.conf = HBaseConfiguration.create(UTIL.getConfiguration()); +this.conf.setBoolean(QueryServices.INDEX_FAILURE_THROW_EXCEPTION_ATTRIB, false); this.fs = UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
phoenix git commit: PHOENIX-4465 Default values of some of the table/column properties like max versions changed in HBase 2.0(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 1fafebf5d -> a324f6da6 PHOENIX-4465 Default values of some of the table/column properties like max versions changed in HBase 2.0(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a324f6da Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a324f6da Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a324f6da Branch: refs/heads/5.x-HBase-2.0 Commit: a324f6da680226158807c1dc7e94f94d9addad84 Parents: 1fafebf Author: Rajeshbabu Chintaguntla Authored: Thu Dec 21 18:18:46 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Dec 21 18:18:46 2017 +0530 -- .../org/apache/phoenix/tx/TransactionIT.java| 10 +- .../coprocessor/DelegateRegionObserver.java | 3 +- .../coprocessor/MetaDataEndpointImpl.java | 97 .../PhoenixTransactionalProcessor.java | 8 ++ .../query/ConnectionQueryServicesImpl.java | 22 +++-- 5 files changed, 67 insertions(+), 73 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/a324f6da/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java index dcbc83e..e743bfd 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java @@ -172,7 +172,7 @@ public class TransactionIT extends ParallelStatsDisabledIT { assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions()); assertEquals(1000, colDesc.getTimeToLive()); byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); -assertEquals(1000, Bytes.toInt(propertyTTL)); +assertEquals(1000, Integer.parseInt(Bytes.toString(propertyTTL))); } desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("IDX1")); @@ -180,7 +180,7 @@ public class TransactionIT extends ParallelStatsDisabledIT { assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions()); assertEquals(1000, colDesc.getTimeToLive()); byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); -assertEquals(1000, Bytes.toInt(propertyTTL)); +assertEquals(1000, Integer.parseInt(Bytes.toString(propertyTTL))); } desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("IDX2")); @@ -188,7 +188,7 @@ public class TransactionIT extends ParallelStatsDisabledIT { assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions()); assertEquals(1000, colDesc.getTimeToLive()); byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); -assertEquals(1000, Bytes.toInt(propertyTTL)); +assertEquals(1000, Integer.parseInt(Bytes.toString(propertyTTL))); } conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "2(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)"); @@ -205,7 +205,7 @@ public class TransactionIT extends ParallelStatsDisabledIT { assertEquals(10, colDesc.getMaxVersions()); assertEquals(1000, colDesc.getTimeToLive()); byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); -assertEquals(1000, Bytes.toInt(propertyTTL)); +assertEquals(1000, Integer.parseInt(Bytes.toString(propertyTTL))); } conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "3(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)"); @@ -236,7 +236,7 @@ public class TransactionIT extends ParallelStatsDisabledIT { assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions()); assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_TTL, colDesc.getTimeToLive()); byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES); -assertEquals(1000, Bytes.toInt(propertyTTL)); +assertEquals(1000, Integer.parseInt(Bytes.toString(propertyTTL))); } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/a324f6da/phoenix-core/src/main/
phoenix git commit: PHOENIX-4481 Some IT tests are failing with wrong bytes count after updating statistics(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 0fcd9de8b -> 91bfaf0e7 PHOENIX-4481 Some IT tests are failing with wrong bytes count after updating statistics(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/91bfaf0e Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/91bfaf0e Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/91bfaf0e Branch: refs/heads/5.x-HBase-2.0 Commit: 91bfaf0e7f2ce67d30d54ef162771af4c839fc26 Parents: 0fcd9de Author: Rajeshbabu Chintaguntla Authored: Fri Dec 22 20:01:08 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri Dec 22 20:01:08 2017 +0530 -- .../apache/phoenix/schema/stats/DefaultStatisticsCollector.java| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/91bfaf0e/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java index 42f8b91..853c24d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java @@ -334,7 +334,7 @@ class DefaultStatisticsCollector implements StatisticsCollector { incrementRow = false; } } -int kvLength = CellUtil.estimatedSerializedSizeOf(cell); +int kvLength = KeyValueUtil.getSerializedSize(cell, true); long byteCount = gps.getFirst() + kvLength; gps.setFirst(byteCount); if (byteCount >= guidePostDepth) {
phoenix git commit: PHOENIX-4495 Fix possible NPE/ClassCastException in Stats IT tests(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 2af8aaf6b -> b4ccdd2c9 PHOENIX-4495 Fix possible NPE/ClassCastException in Stats IT tests(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b4ccdd2c Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b4ccdd2c Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b4ccdd2c Branch: refs/heads/5.x-HBase-2.0 Commit: b4ccdd2c97494b3ce40f2594e48baecd0e72bf31 Parents: 2af8aaf Author: Rajeshbabu Chintaguntla Authored: Tue Dec 26 12:15:58 2017 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Dec 26 12:15:58 2017 +0530 -- .../java/org/apache/phoenix/schema/stats/StatsCollectorIT.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/b4ccdd2c/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java index 5aa97ab..f7a2edd 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java @@ -824,9 +824,8 @@ public abstract class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT { private RegionCoprocessorEnvironment getRegionEnvrionment(String tableName) throws IOException, InterruptedException { -return getUtility() -.getRSForFirstRegionInTable(TableName.valueOf(tableName)) - .getOnlineRegionsLocalContext().iterator().next().getCoprocessorHost() +return getUtility().getMiniHBaseCluster().getRegions(TableName.valueOf(tableName)).get(0) +.getCoprocessorHost() .findCoprocessorEnvironment(UngroupedAggregateRegionObserver.class.getName()); } }
phoenix git commit: PHOENIX-4547 Use hbase 2.0.0-beta-1(Josh Elser)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 42f4fed7e -> fd4ef4fb3 PHOENIX-4547 Use hbase 2.0.0-beta-1(Josh Elser) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fd4ef4fb Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fd4ef4fb Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fd4ef4fb Branch: refs/heads/5.x-HBase-2.0 Commit: fd4ef4fb3f7d6fad2c47d51399d3d9002696fdd9 Parents: 42f4fed Author: Rajeshbabu Chintaguntla Authored: Mon Jan 22 12:28:07 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Jan 22 12:28:07 2018 +0530 -- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd4ef4fb/pom.xml -- diff --git a/pom.xml b/pom.xml index 0dba0c7..d9f6e7b 100644 --- a/pom.xml +++ b/pom.xml @@ -66,7 +66,7 @@ ${project.basedir} -2.0.0-beta-1-SNAPSHOT +2.0.0-beta-1 3.0.0-beta1
phoenix git commit: PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master 195f82bae -> 4b76b210a PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4b76b210 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4b76b210 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4b76b210 Branch: refs/heads/master Commit: 4b76b210acb23e6402f5461df36eeb612ee5de5c Parents: 195f82b Author: Rajeshbabu Chintaguntla Authored: Wed Jan 24 00:11:02 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Jan 24 00:11:02 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 29 .../apache/phoenix/compile/DeleteCompiler.java | 17 2 files changed, 40 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b76b210/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 9eac0af..e111e7a 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -20,6 +20,7 @@ package org.apache.phoenix.end2end; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.sql.Connection; import java.sql.Date; @@ -734,6 +735,34 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } + +@Test +public void testClientSideDeleteShouldNotFailWhenSameColumnPresentInMultipleIndexes() +throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String indexName2 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +String idx2 = "CREATE INDEX " + indexName2 + " ON " + tableName + "(v1, v2)"; +try (Connection conn = DriverManager.getConnection(getUrl())) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +conn.createStatement().execute(idx2); +Statement stmt = conn.createStatement(); +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES (1,'value', 'value2')"); +conn.commit(); +conn.setAutoCommit(false); +try { +conn.createStatement().execute("DELETE FROM " + tableName + " WHERE pk1 > 0"); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b76b210/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 7a880e9..fd80238 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -25,6 +25,7 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -466,7 +467,7 @@ public class DeleteCompiler { for (PTable index : immutableIndexes) { selectColumnCount += index.getPKColumns().size() - pkColumnCount; } -List projectedColumns = Lists.newArrayListWithExpectedSize(selectColumnCount + pkColumnOffset); +Set projectedColumns = new LinkedHashSet(selectColumnCount + pkColumnOffset); List aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount); for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) { PColumn column = table.getPKColumns().get(i); @@ -487,8 +488,10 @@ public class DeleteCompiler { String columnName = columnInfo.getSecond(); boolean h
phoenix git commit: PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 9386467c5 -> 6fb41c9b6 PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6fb41c9b Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6fb41c9b Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6fb41c9b Branch: refs/heads/4.x-HBase-1.3 Commit: 6fb41c9b6c79df59f8f5d68706edc4ab65fefda2 Parents: 9386467 Author: Rajeshbabu Chintaguntla Authored: Wed Jan 24 00:14:16 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Jan 24 00:14:16 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 29 .../apache/phoenix/compile/DeleteCompiler.java | 17 2 files changed, 40 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/6fb41c9b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 9eac0af..e111e7a 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -20,6 +20,7 @@ package org.apache.phoenix.end2end; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.sql.Connection; import java.sql.Date; @@ -734,6 +735,34 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } + +@Test +public void testClientSideDeleteShouldNotFailWhenSameColumnPresentInMultipleIndexes() +throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String indexName2 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +String idx2 = "CREATE INDEX " + indexName2 + " ON " + tableName + "(v1, v2)"; +try (Connection conn = DriverManager.getConnection(getUrl())) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +conn.createStatement().execute(idx2); +Statement stmt = conn.createStatement(); +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES (1,'value', 'value2')"); +conn.commit(); +conn.setAutoCommit(false); +try { +conn.createStatement().execute("DELETE FROM " + tableName + " WHERE pk1 > 0"); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/6fb41c9b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 7a880e9..fd80238 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -25,6 +25,7 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -466,7 +467,7 @@ public class DeleteCompiler { for (PTable index : immutableIndexes) { selectColumnCount += index.getPKColumns().size() - pkColumnCount; } -List projectedColumns = Lists.newArrayListWithExpectedSize(selectColumnCount + pkColumnOffset); +Set projectedColumns = new LinkedHashSet(selectColumnCount + pkColumnOffset); List aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount); for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) { PColumn column = table.getPKColumns().get(i); @@ -487,8 +488,10 @@ public class DeleteCompiler { String columnName = columnInfo.getSecond();
phoenix git commit: PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 b6d848983 -> 24547536b PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/24547536 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/24547536 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/24547536 Branch: refs/heads/4.x-HBase-1.2 Commit: 24547536b1b0f1612c9429e1b29b63af11d7fd2f Parents: b6d8489 Author: Rajeshbabu Chintaguntla Authored: Wed Jan 24 00:15:01 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Jan 24 00:15:01 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 29 .../apache/phoenix/compile/DeleteCompiler.java | 17 2 files changed, 40 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/24547536/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 9eac0af..e111e7a 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -20,6 +20,7 @@ package org.apache.phoenix.end2end; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.sql.Connection; import java.sql.Date; @@ -734,6 +735,34 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } + +@Test +public void testClientSideDeleteShouldNotFailWhenSameColumnPresentInMultipleIndexes() +throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String indexName2 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +String idx2 = "CREATE INDEX " + indexName2 + " ON " + tableName + "(v1, v2)"; +try (Connection conn = DriverManager.getConnection(getUrl())) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +conn.createStatement().execute(idx2); +Statement stmt = conn.createStatement(); +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES (1,'value', 'value2')"); +conn.commit(); +conn.setAutoCommit(false); +try { +conn.createStatement().execute("DELETE FROM " + tableName + " WHERE pk1 > 0"); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/24547536/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 7a880e9..fd80238 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -25,6 +25,7 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -466,7 +467,7 @@ public class DeleteCompiler { for (PTable index : immutableIndexes) { selectColumnCount += index.getPKColumns().size() - pkColumnCount; } -List projectedColumns = Lists.newArrayListWithExpectedSize(selectColumnCount + pkColumnOffset); +Set projectedColumns = new LinkedHashSet(selectColumnCount + pkColumnOffset); List aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount); for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) { PColumn column = table.getPKColumns().get(i); @@ -487,8 +488,10 @@ public class DeleteCompiler { String columnName = columnInfo.getSecond();
phoenix git commit: PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-0.98 eaf6d4c0c -> 47e8262d4 PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/47e8262d Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/47e8262d Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/47e8262d Branch: refs/heads/4.x-HBase-0.98 Commit: 47e8262d41ecb56f4a52cd4f1aca82741a840f5c Parents: eaf6d4c Author: Rajeshbabu Chintaguntla Authored: Wed Jan 24 00:17:56 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Jan 24 00:17:56 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 29 .../apache/phoenix/compile/DeleteCompiler.java | 17 2 files changed, 40 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e8262d/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 9eac0af..e111e7a 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -20,6 +20,7 @@ package org.apache.phoenix.end2end; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.sql.Connection; import java.sql.Date; @@ -734,6 +735,34 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } + +@Test +public void testClientSideDeleteShouldNotFailWhenSameColumnPresentInMultipleIndexes() +throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String indexName2 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +String idx2 = "CREATE INDEX " + indexName2 + " ON " + tableName + "(v1, v2)"; +try (Connection conn = DriverManager.getConnection(getUrl())) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +conn.createStatement().execute(idx2); +Statement stmt = conn.createStatement(); +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES (1,'value', 'value2')"); +conn.commit(); +conn.setAutoCommit(false); +try { +conn.createStatement().execute("DELETE FROM " + tableName + " WHERE pk1 > 0"); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e8262d/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 7a880e9..fd80238 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -25,6 +25,7 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -466,7 +467,7 @@ public class DeleteCompiler { for (PTable index : immutableIndexes) { selectColumnCount += index.getPKColumns().size() - pkColumnCount; } -List projectedColumns = Lists.newArrayListWithExpectedSize(selectColumnCount + pkColumnOffset); +Set projectedColumns = new LinkedHashSet(selectColumnCount + pkColumnOffset); List aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount); for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) { PColumn column = table.getPKColumns().get(i); @@ -487,8 +488,10 @@ public class DeleteCompiler { String columnName = columnInfo.getSecond();
phoenix git commit: PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 fd4ef4fb3 -> 844cb123b PHOENIX-4551 Possible ColumnAlreadyExistsException is thrown from delete when autocommit off(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/844cb123 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/844cb123 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/844cb123 Branch: refs/heads/5.x-HBase-2.0 Commit: 844cb123b68b625e704462addd2e2c104fa332df Parents: fd4ef4f Author: Rajeshbabu Chintaguntla Authored: Wed Jan 24 00:26:43 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Jan 24 00:26:43 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 29 .../apache/phoenix/compile/DeleteCompiler.java | 17 2 files changed, 40 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/844cb123/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 9eac0af..e111e7a 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -20,6 +20,7 @@ package org.apache.phoenix.end2end; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.sql.Connection; import java.sql.Date; @@ -734,6 +735,34 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } + +@Test +public void testClientSideDeleteShouldNotFailWhenSameColumnPresentInMultipleIndexes() +throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String indexName2 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +String idx2 = "CREATE INDEX " + indexName2 + " ON " + tableName + "(v1, v2)"; +try (Connection conn = DriverManager.getConnection(getUrl())) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +conn.createStatement().execute(idx2); +Statement stmt = conn.createStatement(); +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES (1,'value', 'value2')"); +conn.commit(); +conn.setAutoCommit(false); +try { +conn.createStatement().execute("DELETE FROM " + tableName + " WHERE pk1 > 0"); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/844cb123/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index f038cda..ff3d501 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -25,6 +25,7 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -442,7 +443,7 @@ public class DeleteCompiler { for (PTable index : immutableIndexes) { selectColumnCount += index.getPKColumns().size() - pkColumnCount; } -List projectedColumns = Lists.newArrayListWithExpectedSize(selectColumnCount + pkColumnOffset); +Set projectedColumns = new LinkedHashSet(selectColumnCount + pkColumnOffset); List aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount); for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) { PColumn column = table.getPKColumns().get(i); @@ -463,8 +464,10 @@ public class DeleteCompiler { String columnName = columnInfo.
phoenix git commit: PHOENIX-4534 upsert/delete/upsert for the same row corrupts the indexes(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 668c36ca6 -> d1b5fd528 PHOENIX-4534 upsert/delete/upsert for the same row corrupts the indexes(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d1b5fd52 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d1b5fd52 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d1b5fd52 Branch: refs/heads/5.x-HBase-2.0 Commit: d1b5fd52885aac904016b38aa5d1ece43b0053ab Parents: 668c36c Author: Rajeshbabu Chintaguntla Authored: Wed Jan 24 15:40:12 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Jan 24 15:40:12 2018 +0530 -- .../phoenix/end2end/index/MutableIndexIT.java | 41 .../filter/ApplyAndFilterDeletesFilter.java | 8 .../index/scanner/FilteredKeyValueScanner.java | 7 +++- 3 files changed, 47 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1b5fd52/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java index a931084..0fbb933 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java @@ -728,6 +728,47 @@ public class MutableIndexIT extends ParallelStatsDisabledIT { } } + + @Test + public void testUpsertingDeletedRowShouldGiveProperDataWithIndexes() throws Exception { + testUpsertingDeletedRowShouldGiveProperDataWithIndexes(false); + } + + @Test + public void testUpsertingDeletedRowShouldGiveProperDataWithMultiCFIndexes() throws Exception { + testUpsertingDeletedRowShouldGiveProperDataWithIndexes(true); + } + + private void testUpsertingDeletedRowShouldGiveProperDataWithIndexes(boolean multiCf) throws Exception { + String tableName = "TBL_" + generateUniqueName(); + String indexName = "IDX_" + generateUniqueName(); + String columnFamily1 = "cf1"; + String columnFamily2 = "cf2"; + String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName); + try (Connection conn = getConnection()) { +conn.createStatement().execute( +"create table " + fullTableName + " (id integer primary key, " ++ (multiCf ? columnFamily1 : "") + "f float, " ++ (multiCf ? columnFamily2 : "") + "s varchar)" + tableDDLOptions); +conn.createStatement().execute( +"create index " + indexName + " on " + fullTableName + " (" ++ (multiCf ? columnFamily1 : "") + "f) include ("+(multiCf ? columnFamily2 : "") +"s)"); +conn.createStatement().execute( +"upsert into " + fullTableName + " values (1, 0.5, 'foo')"); + conn.commit(); + conn.createStatement().execute("delete from " + fullTableName + " where id = 1"); + conn.commit(); +conn.createStatement().execute( +"upsert into " + fullTableName + " values (1, 0.5, 'foo')"); + conn.commit(); + ResultSet rs = conn.createStatement().executeQuery("select * from "+indexName); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(2)); + assertEquals(0.5F, rs.getFloat(1), 0.0); + assertEquals("foo", rs.getString(3)); + } + } + private void upsertRow(String dml, Connection tenantConn, int i) throws SQLException { PreparedStatement stmt = tenantConn.prepareStatement(dml); stmt.setString(1, "00" + String.valueOf(i)); http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1b5fd52/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java index 67049f5..0fa9487 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered
phoenix git commit: PHOENIX-4534 upsert/delete/upsert for the same row corrupts the indexes-addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 ab796a8f5 -> b5d0bd789 PHOENIX-4534 upsert/delete/upsert for the same row corrupts the indexes-addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b5d0bd78 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b5d0bd78 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b5d0bd78 Branch: refs/heads/5.x-HBase-2.0 Commit: b5d0bd78994f3c7c585cfccc9b2537a6a088a783 Parents: ab796a8 Author: Rajeshbabu Chintaguntla Authored: Thu Jan 25 17:58:40 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Jan 25 17:58:40 2018 +0530 -- .../hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5d0bd78/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java index 0fa9487..2a606c5 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java @@ -94,7 +94,6 @@ public class ApplyAndFilterDeletesFilter extends FilterBase { @Override public void reset(){ this.coveringDelete.reset(); -this.done = false; }
phoenix git commit: PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master f5512105c -> a6bf7350d PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a6bf7350 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a6bf7350 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a6bf7350 Branch: refs/heads/master Commit: a6bf7350d311e42402d330b07cb412b422eeaae9 Parents: f551210 Author: Rajeshbabu Chintaguntla Authored: Mon Feb 12 13:36:18 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Feb 12 13:36:18 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 36 .../apache/phoenix/compile/DeleteCompiler.java | 4 ++- 2 files changed, 39 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6bf7350/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 498aeff..5e65927 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -17,6 +17,7 @@ */ package org.apache.phoenix.end2end; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; @@ -33,7 +34,10 @@ import java.sql.Types; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Properties; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; import org.junit.Test; @@ -799,6 +803,38 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } } + +@Test +public void testDeleteShouldNotFailWhenTheRowsMoreThanMaxMutationSize() throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))" ++ " IMMUTABLE_ROWS=true"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,Integer.toString(10)); +try (Connection conn = DriverManager.getConnection(getUrl(), props)) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +Statement stmt = conn.createStatement(); +for(int i = 0; i < 20; i++) { +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES ("+i+",'value"+i+"', 'value2')"); +if (i % 10 == 0) { +conn.commit(); +} +} +conn.commit(); +conn.setAutoCommit(true); +try { +conn.createStatement().execute("DELETE FROM " + tableName); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6bf7350/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 54e63d2..6e500c0 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -256,7 +256,9 @@ public class DeleteCompiler { connection.getMutationState().send(); mutations.clear(); if (otherMutations != null) { -otherMutations.clear(); +for(MultiRowMutationState multiRowMutationState: otherMutations) { +multiRowMutationState.clear(); +} } } }
phoenix git commit: PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.3 d15efd1bf -> 3d67e8d5c PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3d67e8d5 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3d67e8d5 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3d67e8d5 Branch: refs/heads/4.x-HBase-1.3 Commit: 3d67e8d5c614627dab7826c1273b0e440e49f73f Parents: d15efd1 Author: Rajeshbabu Chintaguntla Authored: Mon Feb 12 13:37:21 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Feb 12 13:37:21 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 36 .../apache/phoenix/compile/DeleteCompiler.java | 4 ++- 2 files changed, 39 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d67e8d5/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 498aeff..5e65927 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -17,6 +17,7 @@ */ package org.apache.phoenix.end2end; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; @@ -33,7 +34,10 @@ import java.sql.Types; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Properties; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; import org.junit.Test; @@ -799,6 +803,38 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } } + +@Test +public void testDeleteShouldNotFailWhenTheRowsMoreThanMaxMutationSize() throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))" ++ " IMMUTABLE_ROWS=true"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,Integer.toString(10)); +try (Connection conn = DriverManager.getConnection(getUrl(), props)) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +Statement stmt = conn.createStatement(); +for(int i = 0; i < 20; i++) { +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES ("+i+",'value"+i+"', 'value2')"); +if (i % 10 == 0) { +conn.commit(); +} +} +conn.commit(); +conn.setAutoCommit(true); +try { +conn.createStatement().execute("DELETE FROM " + tableName); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d67e8d5/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 54e63d2..6e500c0 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -256,7 +256,9 @@ public class DeleteCompiler { connection.getMutationState().send(); mutations.clear(); if (otherMutations != null) { -otherMutations.clear(); +for(MultiRowMutationState multiRowMutationState: otherMutations) { +multiRowMutationState.clear(); +} } } }
phoenix git commit: PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-0.98 1d34ad38b -> c9e23eac5 PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c9e23eac Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c9e23eac Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c9e23eac Branch: refs/heads/4.x-HBase-0.98 Commit: c9e23eac59bba3f3dc1df9079a31a31b16afd39c Parents: 1d34ad3 Author: Rajeshbabu Chintaguntla Authored: Mon Feb 12 13:44:49 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Feb 12 13:44:49 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 36 .../apache/phoenix/compile/DeleteCompiler.java | 4 ++- 2 files changed, 39 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9e23eac/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 498aeff..5e65927 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -17,6 +17,7 @@ */ package org.apache.phoenix.end2end; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; @@ -33,7 +34,10 @@ import java.sql.Types; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Properties; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; import org.junit.Test; @@ -799,6 +803,38 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } } + +@Test +public void testDeleteShouldNotFailWhenTheRowsMoreThanMaxMutationSize() throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))" ++ " IMMUTABLE_ROWS=true"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,Integer.toString(10)); +try (Connection conn = DriverManager.getConnection(getUrl(), props)) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +Statement stmt = conn.createStatement(); +for(int i = 0; i < 20; i++) { +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES ("+i+",'value"+i+"', 'value2')"); +if (i % 10 == 0) { +conn.commit(); +} +} +conn.commit(); +conn.setAutoCommit(true); +try { +conn.createStatement().execute("DELETE FROM " + tableName); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9e23eac/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 54e63d2..6e500c0 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -256,7 +256,9 @@ public class DeleteCompiler { connection.getMutationState().send(); mutations.clear(); if (otherMutations != null) { -otherMutations.clear(); +for(MultiRowMutationState multiRowMutationState: otherMutations) { +multiRowMutationState.clear(); +} } } }
phoenix git commit: PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.2 e4566fcd7 -> ff4ad203b PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ff4ad203 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ff4ad203 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ff4ad203 Branch: refs/heads/4.x-HBase-1.2 Commit: ff4ad203b5ac932506d06aa6496a7858031003c7 Parents: e4566fc Author: Rajeshbabu Chintaguntla Authored: Mon Feb 12 13:46:42 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Feb 12 13:46:42 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 36 .../apache/phoenix/compile/DeleteCompiler.java | 4 ++- 2 files changed, 39 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff4ad203/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 498aeff..5e65927 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -17,6 +17,7 @@ */ package org.apache.phoenix.end2end; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; @@ -33,7 +34,10 @@ import java.sql.Types; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Properties; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; import org.junit.Test; @@ -799,6 +803,38 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } } + +@Test +public void testDeleteShouldNotFailWhenTheRowsMoreThanMaxMutationSize() throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))" ++ " IMMUTABLE_ROWS=true"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,Integer.toString(10)); +try (Connection conn = DriverManager.getConnection(getUrl(), props)) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +Statement stmt = conn.createStatement(); +for(int i = 0; i < 20; i++) { +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES ("+i+",'value"+i+"', 'value2')"); +if (i % 10 == 0) { +conn.commit(); +} +} +conn.commit(); +conn.setAutoCommit(true); +try { +conn.createStatement().execute("DELETE FROM " + tableName); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff4ad203/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 54e63d2..6e500c0 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -256,7 +256,9 @@ public class DeleteCompiler { connection.getMutationState().send(); mutations.clear(); if (otherMutations != null) { -otherMutations.clear(); +for(MultiRowMutationState multiRowMutationState: otherMutations) { +multiRowMutationState.clear(); +} } } }
phoenix git commit: PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/4.x-HBase-1.1 587f78de2 -> 66fc54c68 PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/66fc54c6 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/66fc54c6 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/66fc54c6 Branch: refs/heads/4.x-HBase-1.1 Commit: 66fc54c689ce9c809d55cef91bc0b316f5cb5ca0 Parents: 587f78d Author: Rajeshbabu Chintaguntla Authored: Mon Feb 12 14:00:20 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Feb 12 14:00:20 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 37 .../apache/phoenix/compile/DeleteCompiler.java | 4 ++- 2 files changed, 40 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/66fc54c6/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 6e4cb2a..c31fdd9 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -17,9 +17,11 @@ */ package org.apache.phoenix.end2end; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.sql.Connection; import java.sql.Date; @@ -32,7 +34,10 @@ import java.sql.Types; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Properties; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; import org.junit.Test; @@ -769,6 +774,38 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } + +@Test +public void testDeleteShouldNotFailWhenTheRowsMoreThanMaxMutationSize() throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))" ++ " IMMUTABLE_ROWS=true"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,Integer.toString(10)); +try (Connection conn = DriverManager.getConnection(getUrl(), props)) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +Statement stmt = conn.createStatement(); +for(int i = 0; i < 20; i++) { +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES ("+i+",'value"+i+"', 'value2')"); +if (i % 10 == 0) { +conn.commit(); +} +} +conn.commit(); +conn.setAutoCommit(true); +try { +conn.createStatement().execute("DELETE FROM " + tableName); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/66fc54c6/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index bc96627..46f4542 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -255,7 +255,9 @@ public class DeleteCompiler { connection.getMutationState().send(); mutations.clear(); if (otherMutations != null) { -otherMutations.clear(); +for(MultiRowMutationState multiRowMutationState: otherMutations) { +multiRowMutationState.clear(); +} } } }
phoenix git commit: PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 8c1746c21 -> 7cb65d155 PHOENIX-4591 Possible IndexOutOfBoundsException with delete query on bigger table(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7cb65d15 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7cb65d15 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7cb65d15 Branch: refs/heads/5.x-HBase-2.0 Commit: 7cb65d155b716aa1a4bb6179fe440b1e5f6d79e9 Parents: 8c1746c Author: Rajeshbabu Chintaguntla Authored: Mon Feb 12 14:22:38 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Mon Feb 12 14:22:38 2018 +0530 -- .../org/apache/phoenix/end2end/DeleteIT.java| 36 .../apache/phoenix/compile/DeleteCompiler.java | 4 ++- 2 files changed, 39 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/7cb65d15/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java index 498aeff..5e65927 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java @@ -17,6 +17,7 @@ */ package org.apache.phoenix.end2end; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; @@ -33,7 +34,10 @@ import java.sql.Types; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Properties; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; import org.junit.Test; @@ -799,6 +803,38 @@ public class DeleteIT extends ParallelStatsDisabledIT { } } } + +@Test +public void testDeleteShouldNotFailWhenTheRowsMoreThanMaxMutationSize() throws Exception { +String tableName = generateUniqueName(); +String indexName1 = generateUniqueName(); +String ddl = +"CREATE TABLE IF NOT EXISTS " ++ tableName ++ " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))" ++ " IMMUTABLE_ROWS=true"; +String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)"; +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,Integer.toString(10)); +try (Connection conn = DriverManager.getConnection(getUrl(), props)) { +conn.createStatement().execute(ddl); +conn.createStatement().execute(idx1); +Statement stmt = conn.createStatement(); +for(int i = 0; i < 20; i++) { +stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES ("+i+",'value"+i+"', 'value2')"); +if (i % 10 == 0) { +conn.commit(); +} +} +conn.commit(); +conn.setAutoCommit(true); +try { +conn.createStatement().execute("DELETE FROM " + tableName); +} catch (Exception e) { +fail("Should not throw any exception"); +} +} +} } http://git-wip-us.apache.org/repos/asf/phoenix/blob/7cb65d15/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index b77fcbe..53fc398 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -239,7 +239,9 @@ public class DeleteCompiler { connection.getMutationState().send(); mutations.clear(); if (indexMutations != null) { -indexMutations.clear(); +for (Map multiRowMutationState : indexMutations) { +multiRowMutationState.clear(); +} } } }
phoenix git commit: PHOENIX-4386 Calculate the estimatedSize of MutationState using Map> mutations(Thomas D'Silva)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 6cadbab92 -> aeb33b9fb PHOENIX-4386 Calculate the estimatedSize of MutationState using Map> mutations(Thomas D'Silva) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/aeb33b9f Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/aeb33b9f Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/aeb33b9f Branch: refs/heads/5.x-HBase-2.0 Commit: aeb33b9fbae9d19da199fed8e54d60939bdd57d8 Parents: 6cadbab Author: Rajeshbabu Chintaguntla Authored: Tue Feb 20 16:56:43 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Feb 20 16:56:43 2018 +0530 -- .../apache/phoenix/end2end/MutationStateIT.java | 144 + .../org/apache/phoenix/end2end/QueryMoreIT.java | 42 - .../apache/phoenix/execute/PartialCommitIT.java | 3 +- .../apache/phoenix/compile/DeleteCompiler.java | 18 +-- .../apache/phoenix/compile/UpsertCompiler.java | 11 +- .../apache/phoenix/execute/MutationState.java | 159 +-- .../java/org/apache/phoenix/util/IndexUtil.java | 4 +- .../phoenix/util/PhoenixKeyValueUtil.java | 48 ++ 8 files changed, 289 insertions(+), 140 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/aeb33b9f/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java new file mode 100644 index 000..2d5f360 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java @@ -0,0 +1,144 @@ +package org.apache.phoenix.end2end; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.execute.MutationState; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.query.QueryServices; +import org.junit.Test; + +public class MutationStateIT extends ParallelStatsDisabledIT { + +private static final String DDL = +" (ORGANIZATION_ID CHAR(15) NOT NULL, SCORE DOUBLE, " ++ "ENTITY_ID CHAR(15) NOT NULL, TAGS VARCHAR, CONSTRAINT PAGE_SNAPSHOT_PK " ++ "PRIMARY KEY (ORGANIZATION_ID, ENTITY_ID DESC)) MULTI_TENANT=TRUE"; + +private void upsertRows(PhoenixConnection conn, String fullTableName) throws SQLException { +PreparedStatement stmt = +conn.prepareStatement("upsert into " + fullTableName ++ " (organization_id, entity_id, score) values (?,?,?)"); +for (int i = 0; i < 1; i++) { +stmt.setString(1, "" + i); +stmt.setString(2, "" + i); +stmt.setInt(3, 1); +stmt.execute(); +} +} + +@Test +public void testMaxMutationSize() throws Exception { +Properties connectionProperties = new Properties(); + connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3"); + connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "100"); +PhoenixConnection connection = +(PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties); +String fullTableName = generateUniqueName(); +try (Statement stmt = connection.createStatement()) { +stmt.execute( +"CREATE TABLE " + fullTableName + DDL); +} +try { +upsertRows(connection, fullTableName); +fail(); +} catch (SQLException e) { + assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(), +e.getErrorCode()); +} + +// set the max mutation size (bytes) to a low value + connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "1000"); + connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "4"); +connection = +(PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties); +try { +upsertRows(connection, fullTableName); +fail(); +} catch (SQLException e) { + assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getErrorCo
phoenix git commit: PHOENIX-4531 Delete on a table with a global mutable index can issue client-side deletes against the index(Vincent Poon)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 aeb33b9fb -> df98ad3f3 PHOENIX-4531 Delete on a table with a global mutable index can issue client-side deletes against the index(Vincent Poon) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/df98ad3f Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/df98ad3f Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/df98ad3f Branch: refs/heads/5.x-HBase-2.0 Commit: df98ad3f3ec2343749cc6e749a673bcba928aa79 Parents: aeb33b9 Author: Rajeshbabu Chintaguntla Authored: Tue Feb 20 17:11:00 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Feb 20 17:11:00 2018 +0530 -- .../phoenix/end2end/index/BaseIndexIT.java | 20 ++ .../end2end/index/PartialIndexRebuilderIT.java | 48 ++- .../apache/phoenix/compile/DeleteCompiler.java | 65 +++- .../apache/phoenix/optimize/QueryOptimizer.java | 13 ++-- .../phoenix/compile/QueryOptimizerTest.java | 41 5 files changed, 165 insertions(+), 22 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/df98ad3f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java index 3fd6b3b..c2f00e7 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java @@ -37,6 +37,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.Iterator; +import java.util.List; import java.util.Properties; import java.util.Random; @@ -51,6 +53,8 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.compile.FromCompiler; import org.apache.phoenix.end2end.ParallelStatsDisabledIT; @@ -68,6 +72,7 @@ import org.apache.phoenix.schema.PTableKey; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.ReadOnlyProps; @@ -202,6 +207,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT { String dml = "DELETE from " + fullTableName + " WHERE long_col2 = 4"; assertEquals(1,conn.createStatement().executeUpdate(dml)); +assertNoClientSideIndexMutations(conn); conn.commit(); String query = "SELECT /*+ NO_INDEX */ long_pk FROM " + fullTableName; @@ -232,6 +238,19 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT { } } +private void assertNoClientSideIndexMutations(Connection conn) throws SQLException { +if (mutable) { +Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); +if (iterator.hasNext()) { +byte[] tableName = iterator.next().getFirst(); // skip data table mutations +PTable table = PhoenixRuntime.getTable(conn, Bytes.toString(tableName)); +assertTrue(table.getType() == PTableType.TABLE); // should be data table +boolean hasIndexData = iterator.hasNext(); +assertFalse(hasIndexData); // should have no index data +} +} +} + @Test public void testCreateIndexAfterUpsertStarted() throws Exception { testCreateIndexAfterUpsertStarted(false, @@ -367,6 +386,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT { String dml = "DELETE from " + fullTableName + " WHERE long_col2 = 4"; assertEquals(1,conn.createStatement().executeUpdate(dml)); +assertNoClientSideIndexMutations(conn); conn.commit(); // query the data table http://git-wip-us.apache.org/repos/asf/phoenix/blob/df98ad3f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end
phoenix git commit: PHOENIX-4531 Delete on a table with a global mutable index can issue client-side deletes against the index-addendum(Vincent Poon)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 d931f8c79 -> 63940f949 PHOENIX-4531 Delete on a table with a global mutable index can issue client-side deletes against the index-addendum(Vincent Poon) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/63940f94 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/63940f94 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/63940f94 Branch: refs/heads/5.x-HBase-2.0 Commit: 63940f9491f4159076239bdbcffa2aa9db0b0b7a Parents: d931f8c Author: Rajeshbabu Chintaguntla Authored: Wed Feb 21 19:15:08 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Wed Feb 21 19:15:08 2018 +0530 -- .../main/java/org/apache/phoenix/compile/DeleteCompiler.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/63940f94/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index 6224570..a4d9467 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -251,13 +251,13 @@ public class DeleteCompiler { MutationState state = new MutationState(tableRef, mutations, 0, maxSize, maxSizeBytes, connection); connection.getMutationState().join(state); for (int i = 0; i < otherTableRefs.size(); i++) { -MutationState indexState = new MutationState(otherTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection); +MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection); connection.getMutationState().join(indexState); } connection.getMutationState().send(); mutations.clear(); -if (indexMutations != null) { -for (MultiRowMutationState multiRowMutationState : indexMutations) { +if (otherMutations != null) { +for (MultiRowMutationState multiRowMutationState : otherMutations) { multiRowMutationState.clear(); } }
[1/2] phoenix git commit: PHOENIX-4278 Implement pure client side transactional index maintenance(Ohad Shacham)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 9ff02ea24 -> 1123f96cb http://git-wip-us.apache.org/repos/asf/phoenix/blob/1123f96c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java index 60f07b7..1ad1ce5 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java @@ -33,9 +33,11 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableMapper; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.IntWritable; import org.apache.phoenix.cache.ServerCacheClient; import org.apache.phoenix.coprocessor.BaseScannerRegionObserver; +import org.apache.phoenix.coprocessor.MetaDataProtocol; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.index.PhoenixIndexCodec; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -105,6 +107,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapperhttp://git-wip-us.apache.org/repos/asf/phoenix/blob/1123f96c/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index 5c5a42e..e17471d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -181,7 +181,6 @@ import org.apache.phoenix.hbase.index.util.KeyValueBuilder; import org.apache.phoenix.hbase.index.util.VersionUtil; import org.apache.phoenix.index.PhoenixIndexBuilder; import org.apache.phoenix.index.PhoenixIndexCodec; -import org.apache.phoenix.index.PhoenixTransactionalIndexer; import org.apache.phoenix.iterate.TableResultIterator; import org.apache.phoenix.iterate.TableResultIterator.RenewLeaseStatus; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -858,19 +857,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement && !SchemaUtil.isMetaTable(tableName) && !SchemaUtil.isStatsTable(tableName)) { if (isTransactional) { - if(!newDesc.hasCoprocessor(PhoenixTransactionalIndexer.class.getName())) { - builder.addCoprocessor(PhoenixTransactionalIndexer.class.getName(), null, priority, null); -} // For alter table, remove non transactional index coprocessor if(newDesc.hasCoprocessor(Indexer.class.getName())) { builder.removeCoprocessor(Indexer.class.getName()); } } else { if (!newDesc.hasCoprocessor(Indexer.class.getName())) { -// If exception on alter table to transition back to non transactional -if (newDesc.hasCoprocessor(PhoenixTransactionalIndexer.class.getName())) { - builder.removeCoprocessor(PhoenixTransactionalIndexer.class.getName()); -} Map opts = Maps.newHashMapWithExpectedSize(1); opts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); Indexer.enableIndexing(builder, PhoenixIndexBuilder.class, opts, priority); http://git-wip-us.apache.org/repos/asf/phoenix/blob/1123f96c/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java index 0b48376..633595a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java @@ -1037,27 +1037,15 @@ public class PTableImpl implements PTable { @Override public void delete() { newMutations(); -// we're using the Tephra column family delete marker here to prevent the translation -// of deletes to puts by the Tephra's TransactionProcessor -if (PTableImpl.this.isTransactional(
[2/2] phoenix git commit: PHOENIX-4278 Implement pure client side transactional index maintenance(Ohad Shacham)
PHOENIX-4278 Implement pure client side transactional index maintenance(Ohad Shacham) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1123f96c Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1123f96c Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1123f96c Branch: refs/heads/5.x-HBase-2.0 Commit: 1123f96cb03ef15e26a4aa2bd77d9a931a33e707 Parents: 9ff02ea Author: Rajeshbabu Chintaguntla Authored: Fri Feb 23 23:34:17 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri Feb 23 23:34:17 2018 +0530 -- .../phoenix/end2end/index/BaseIndexIT.java | 2 +- .../phoenix/end2end/index/ImmutableIndexIT.java | 2 + .../org/apache/phoenix/cache/HashCache.java | 1 + .../phoenix/cache/IndexMetaDataCache.java | 8 + .../apache/phoenix/cache/ServerCacheClient.java | 2 + .../org/apache/phoenix/cache/TenantCache.java | 2 +- .../apache/phoenix/cache/TenantCacheImpl.java | 4 +- .../apache/phoenix/compile/DeleteCompiler.java | 3 + .../apache/phoenix/compile/UpsertCompiler.java | 2 + .../coprocessor/MetaDataRegionObserver.java | 2 + .../coprocessor/ServerCachingEndpointImpl.java | 4 +- .../coprocessor/ServerCachingProtocol.java | 2 +- .../UngroupedAggregateRegionObserver.java | 23 +- .../generated/ServerCachingProtos.java | 117 - .../apache/phoenix/execute/MutationState.java | 30 +- .../PhoenixTxnIndexMutationGenerator.java | 519 +++ .../hbase/index/covered/LocalTableState.java| 6 - .../phoenix/hbase/index/covered/TableState.java | 5 - .../index/IndexMetaDataCacheFactory.java| 7 +- .../apache/phoenix/index/PhoenixIndexCodec.java | 3 +- .../phoenix/index/PhoenixIndexMetaData.java | 12 + .../index/PhoenixTransactionalIndexer.java | 8 +- .../phoenix/jdbc/PhoenixDatabaseMetaData.java | 1 + .../apache/phoenix/join/HashCacheFactory.java | 34 +- .../index/PhoenixIndexPartialBuildMapper.java | 5 + .../query/ConnectionQueryServicesImpl.java | 8 - .../org/apache/phoenix/schema/PTableImpl.java | 22 +- .../apache/phoenix/cache/TenantCacheTest.java | 14 +- .../src/main/ServerCachingService.proto | 1 + 29 files changed, 757 insertions(+), 92 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/1123f96c/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java index c2f00e7..48268dd 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java @@ -246,7 +246,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT { PTable table = PhoenixRuntime.getTable(conn, Bytes.toString(tableName)); assertTrue(table.getType() == PTableType.TABLE); // should be data table boolean hasIndexData = iterator.hasNext(); -assertFalse(hasIndexData); // should have no index data +assertFalse(hasIndexData && !transactional); // should have no index data } } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/1123f96c/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java index c83ca4a..85a26f9 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java @@ -72,6 +72,7 @@ import com.google.common.collect.Maps; public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT { private final boolean localIndex; +private final boolean transactional; private final String tableDDLOptions; private volatile boolean stopThreads = false; @@ -83,6 +84,7 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT { public ImmutableIndexIT(boolean localIndex, boolean transactional, boolean columnEncoded) { StringBuilder optionBuilder = new StringBuilder("IMMUTABLE_ROWS=true"); this.localIndex = localIndex; +this.transactional = transactional; if (!columnEncoded) { if (optionBuilder.length()!=0) optionBuilder.append(","); http://git
phoenix git commit: PHOENIX-4487 Missing SYSTEM.MUTEX table upgrading from 4.7 to 4.13(James Taylor)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 1123f96cb -> 1363d5fa6 PHOENIX-4487 Missing SYSTEM.MUTEX table upgrading from 4.7 to 4.13(James Taylor) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1363d5fa Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1363d5fa Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1363d5fa Branch: refs/heads/5.x-HBase-2.0 Commit: 1363d5fa6aacdd9f4b5d37395db0fa42593f45aa Parents: 1123f96 Author: Rajeshbabu Chintaguntla Authored: Fri Feb 23 23:40:26 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Fri Feb 23 23:40:26 2018 +0530 -- .../query/ConnectionQueryServicesImpl.java | 29 1 file changed, 24 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/1363d5fa/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index e17471d..981baab 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -2643,11 +2643,23 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement } catch (TableAlreadyExistsException e) { long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); sysCatalogTableName = e.getTable().getPhysicalName().getString(); -if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP -&& (acquiredMutexLock = acquireUpgradeMutex(currentServerSideTableTimeStamp, mutexRowKey))) { -snapshotName = getSysCatalogSnapshotName(currentServerSideTableTimeStamp); -createSnapshot(snapshotName, sysCatalogTableName); -snapshotCreated = true; +if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP) { +// Ensure that the SYSTEM.MUTEX table has been created prior +// to attempting to acquire the upgrade mutex. If namespace +// mapping is enabled, we've already done this earlier in the +// upgrade, so no need for a bunch of wasted RPCs. +if (currentServerSideTableTimeStamp <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 && + !SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, + ConnectionQueryServicesImpl.this.getProps())) { +try (Admin admin = getAdmin()) { +createSysMutexTable(admin, this.getProps()); +} +} +if (acquiredMutexLock = acquireUpgradeMutex(currentServerSideTableTimeStamp, mutexRowKey)) { +snapshotName = getSysCatalogSnapshotName(currentServerSideTableTimeStamp); +createSnapshot(snapshotName, sysCatalogTableName); +snapshotCreated = true; +} } String columnsToAdd = ""; // This will occur if we have an older SYSTEM.CATALOG and we need to update it to @@ -3196,6 +3208,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement // Try acquiring a lock in SYSMUTEX table before migrating the tables since it involves disabling the table // If we cannot acquire lock, it means some old client is either migrating SYSCAT or trying to upgrade the // schema of SYSCAT table and hence it should not be interrupted +// Create mutex if not already created +if (!tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME)) { +TableName mutexName = SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props); +if (PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME.equals(mutexName) || !tableNames.contains(mutexName)) { +createSysMutexTable(admin, props); +} +} acquiredMutexLock = acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, mutexRowKey); if(acquiredMutexLock) { logger.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace");
phoenix git commit: PHOENIX-4488 Cache config parameters for MetaDataEndPointImpl during initialization(James Taylor)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 1363d5fa6 -> 6c9855139 PHOENIX-4488 Cache config parameters for MetaDataEndPointImpl during initialization(James Taylor) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6c985513 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6c985513 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6c985513 Branch: refs/heads/5.x-HBase-2.0 Commit: 6c9855139a954c5d9e7db6b298d1f59458a83ef8 Parents: 1363d5f Author: Rajeshbabu Chintaguntla Authored: Sat Feb 24 00:03:43 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Sat Feb 24 00:03:43 2018 +0530 -- .../coprocessor/MetaDataEndpointImplTest.java | 44 .../coprocessor/MetaDataEndpointImpl.java | 26 ++-- 2 files changed, 13 insertions(+), 57 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c985513/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java deleted file mode 100644 index 2c558d8..000 --- a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.coprocessor; - -import com.google.common.collect.Lists; -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.PTableType; -import org.junit.Test; - -import java.util.List; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class MetaDataEndpointImplTest { - -@Test -public void testExceededIndexQuota() throws Exception { -PTable parentTable = mock(PTable.class); -List indexes = Lists.newArrayList(mock(PTable.class), mock(PTable.class)); -when(parentTable.getIndexes()).thenReturn(indexes); -Configuration configuration = new Configuration(); -assertFalse(MetaDataEndpointImpl.execeededIndexQuota(PTableType.INDEX, parentTable, configuration)); -configuration.setInt(QueryServices.MAX_INDEXES_PER_TABLE, 1); -assertTrue(MetaDataEndpointImpl.execeededIndexQuota(PTableType.INDEX, parentTable, configuration)); -} -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c985513/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index 019777e..751aea0 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -236,7 +236,6 @@ import org.apache.phoenix.util.UpgradeUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.Cache; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -462,6 +461,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr } private RegionCoprocessorEnvironment env; +private boolean blockWriteRebuildIndex; +private int maxIndexesPerTable; +private boolean isTablesMappingEnabled; /** * Stores a reference to the coprocessor environment provided by the @@ -480,6 +482,13 @@ public class MetaDataEndpointImpl e
phoenix git commit: PHOENIX-4360 Prevent System.Catalog from splitting(Lars Hofhansl)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 6dcc24fa4 -> ba518fb05 PHOENIX-4360 Prevent System.Catalog from splitting(Lars Hofhansl) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ba518fb0 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ba518fb0 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ba518fb0 Branch: refs/heads/5.x-HBase-2.0 Commit: ba518fb05fcdd3c7c3253c010aad29d6231327aa Parents: 6dcc24f Author: Rajeshbabu Chintaguntla Authored: Thu Mar 1 14:41:24 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Mar 1 14:41:24 2018 +0530 -- .../apache/phoenix/end2end/SystemCatalogIT.java | 97 .../phoenix/schema/MetaDataSplitPolicy.java | 23 + 2 files changed, 100 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/ba518fb0/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java new file mode 100644 index 000..9c66b9a --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end; + +import static org.junit.Assert.assertEquals; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.phoenix.util.PhoenixRuntime; +import org.junit.After; +import org.junit.Ignore; +import org.junit.Test; + +@Ignore +public class SystemCatalogIT { +private HBaseTestingUtility testUtil = null; + +@After +public void cleanup() throws Exception { +if (null != testUtil) { + testUtil.shutdownMiniCluster(); + testUtil = null; +} +} + +/** + * Make sure that SYSTEM.CATALOG cannot be split, even with schemas and multi-tenant views + */ +@Test +public void testSystemTableSplit() throws Exception { +testUtil = new HBaseTestingUtility(); +testUtil.startMiniCluster(1); +for (int i=0; i<10; i++) { +createTable("schema"+i+".table_"+i); +} +TableName systemCatalog = TableName.valueOf("SYSTEM.CATALOG"); +RegionLocator rl = testUtil.getConnection().getRegionLocator(systemCatalog); +assertEquals(rl.getAllRegionLocations().size(), 1); + +// now attempt to split SYSTEM.CATALOG +testUtil.getAdmin().split(systemCatalog); + +// make sure the split finishes (there's no synchronous splitting before HBase 2.x) +testUtil.getAdmin().disableTable(systemCatalog); +testUtil.getAdmin().enableTable(systemCatalog); + +// test again... Must still be exactly one region. +rl = testUtil.getConnection().getRegionLocator(systemCatalog); +assertEquals(1, rl.getAllRegionLocations().size()); +} + +private void createTable(String tableName) throws Exception { +try (Connection conn = DriverManager.getConnection(getJdbcUrl()); +Statement stmt = conn.createStatement();) { +stmt.execute("DROP TABLE IF EXISTS " + tableName); +stmt.execute("CREATE TABLE " + tableName ++ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true"); +try (Connection tenant1Conn = getTenantConnection("tenant1")) { +String view1DDL = "CREATE VIEW " + tableName + "_view AS SELECT * FROM &quo
phoenix git commit: PHOENIX-4523 phoenix.schema.isNamespaceMappingEnabled problem(Karan Mehta)
Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 ba518fb05 -> ba1fd85dc PHOENIX-4523 phoenix.schema.isNamespaceMappingEnabled problem(Karan Mehta) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ba1fd85d Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ba1fd85d Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ba1fd85d Branch: refs/heads/5.x-HBase-2.0 Commit: ba1fd85dc80c36f124c6d085fd5dc9a51e91dfd1 Parents: ba518fb Author: Rajeshbabu Chintaguntla Authored: Thu Mar 1 18:45:31 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Thu Mar 1 18:45:31 2018 +0530 -- .../query/ConnectionQueryServicesImpl.java | 36 +++- .../org/apache/phoenix/util/UpgradeUtil.java| 2 ++ .../query/ConnectionQueryServicesImplTest.java | 6 ++-- 3 files changed, 24 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/ba1fd85d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index 43877e2..07bef1b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -2555,15 +2555,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement } } -void createSysMutexTable(Admin admin, ReadOnlyProps props) throws IOException, SQLException { +void createSysMutexTableIfNotExists(Admin admin, ReadOnlyProps props) throws IOException, SQLException { try { -final TableName mutexTableName = SchemaUtil.getPhysicalTableName( -PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props); -List systemTables = getSystemTableNames(admin); -if (systemTables.contains(mutexTableName)) { +if (admin.tableExists(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME)) +|| admin.tableExists(TableName.valueOf( +PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME, +PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME))) { logger.debug("System mutex table already appears to exist, not creating it"); return; } +final TableName mutexTableName = SchemaUtil.getPhysicalTableName( +PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(mutexTableName) .addColumnFamily(ColumnFamilyDescriptorBuilder .newBuilder(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES) @@ -2577,12 +2579,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement put.addColumn(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED); sysMutexTable.put(put); } -} catch (TableExistsException e) { -// Ignore +} catch (IOException e) { + if(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), AccessDeniedException.class)) || + !Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), org.apache.hadoop.hbase.TableNotFoundException.class))) { +// Ignore +} else { +throw e; +} } } -List getSystemTableNames(Admin admin) throws IOException { +List getSystemTableNamesInDefaultNamespace(Admin admin) throws IOException { return Lists.newArrayList(admin.listTableNames(Pattern.compile(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"))); // TODO: replace to pattern } @@ -2601,7 +2608,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement // Catch the IOException to log the error message and then bubble it up for the client to retry. try { -createSysMutexTable(hbaseAdmin, ConnectionQueryServicesImpl.this.getProps()); +createSysMutexTableIfNotExists(hbaseAdmin, ConnectionQueryServicesImpl.this.getProps()); } catch (IOException exception) { logger.error("Failed to created SYSMUTEX table. Upgrade or migration is not possible without it. Please retry."); throw exception; @@ -2653,7 +2660,7 @@ public class ConnectionQueryServicesImpl extends
phoenix git commit: PHOENIX-4576 Fix LocalIndexSplitMergeIT tests failing in master branch - addendum(Rajeshbabu)
Repository: phoenix Updated Branches: refs/heads/master 204d4aa08 -> b6e33f30e PHOENIX-4576 Fix LocalIndexSplitMergeIT tests failing in master branch - addendum(Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b6e33f30 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b6e33f30 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b6e33f30 Branch: refs/heads/master Commit: b6e33f30e2084879feeb3240444806b1e7069135 Parents: 204d4aa Author: Rajeshbabu Chintaguntla Authored: Tue Mar 20 19:35:14 2018 +0530 Committer: Rajeshbabu Chintaguntla Committed: Tue Mar 20 19:35:14 2018 +0530 -- .../phoenix/end2end/LocalIndexSplitMergeIT.java | 59 .../IndexHalfStoreFileReaderGenerator.java | 8 ++- 2 files changed, 65 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6e33f30/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java index 409e98f..dc3e5d3 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java @@ -263,4 +263,63 @@ public class LocalIndexSplitMergeIT extends BaseTest { } } +@Test +public void testLocalIndexScanWithMergeSpecialCase() throws Exception { +String schemaName = generateUniqueName(); +String tableName = schemaName + "." + generateUniqueName(); +String indexName = "IDX_" + generateUniqueName(); +TableName physicalTableName = SchemaUtil.getPhysicalTableName(tableName.getBytes(), false); +createBaseTable(tableName, "('a','b','def')"); +Connection conn1 = getConnectionForLocalIndexTest(); +try { +String[] strings = +{ "aa", "aaa", "", "bb", "cc", "dd", "dff", "g", "h", "i", "j", "k", "l", +"m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" }; +for (int i = 0; i < 26; i++) { +conn1.createStatement() +.execute("UPSERT INTO " + tableName + " values('" + strings[i] + "'," + i ++ "," + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')"); +} +conn1.commit(); +conn1.createStatement() +.execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)"); +conn1.createStatement() +.execute("CREATE LOCAL INDEX " + indexName + "_2 ON " + tableName + "(k3)"); + +HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); +List regionsOfUserTable = + MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), +admin.getConnection(), physicalTableName, false); + admin.mergeRegions(regionsOfUserTable.get(0).getEncodedNameAsBytes(), +regionsOfUserTable.get(1).getEncodedNameAsBytes(), false); +regionsOfUserTable = + MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), +admin.getConnection(), physicalTableName, false); + +while (regionsOfUserTable.size() != 3) { +Thread.sleep(100); +regionsOfUserTable = + MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), +admin.getConnection(), physicalTableName, false); +} +String query = "SELECT t_id,k1,v1 FROM " + tableName; +ResultSet rs = conn1.createStatement().executeQuery(query); +for (int j = 0; j < 26; j++) { +assertTrue(rs.next()); +assertEquals(strings[25-j], rs.getString("t_id")); +assertEquals(25-j, rs.getInt("k1")); +assertEquals(strings[j], rs.getString("V1")); +} +