[2/3] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-06 Thread pboado
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f3defc4c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f3defc4c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f3defc4c

Branch: refs/heads/4.x-cdh5.14
Commit: f3defc4c3031fb8d29464e7077f5a1f67cd525c3
Parents: 376b67f
Author: Vincent Poon 
Authored: Thu Apr 5 18:03:30 2018 +0100
Committer: Pedro Boado 
Committed: Fri Apr 6 22:15:18 2018 +0100

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 40 ++
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 87 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3defc4c/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..631f97f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,15 +41,19 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
@@ -867,6 +871,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+  }
+  }
+
 private void upsertRow(String dml, Connection tenantConn, int i) throws 
SQLException {
 PreparedStatement stmt = tenantConn.prepareStatement(dml);
   stmt.setString(1, "00" + String.valueOf(i));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3defc4c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/ja

[2/3] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-06 Thread pboado
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f3defc4c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f3defc4c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f3defc4c

Branch: refs/heads/4.x-cdh5.13
Commit: f3defc4c3031fb8d29464e7077f5a1f67cd525c3
Parents: 376b67f
Author: Vincent Poon 
Authored: Thu Apr 5 18:03:30 2018 +0100
Committer: Pedro Boado 
Committed: Fri Apr 6 22:15:18 2018 +0100

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 40 ++
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 87 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3defc4c/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..631f97f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,15 +41,19 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
@@ -867,6 +871,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+  }
+  }
+
 private void upsertRow(String dml, Connection tenantConn, int i) throws 
SQLException {
 PreparedStatement stmt = tenantConn.prepareStatement(dml);
   stmt.setString(1, "00" + String.valueOf(i));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3defc4c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/ja

[2/3] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-06 Thread pboado
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f3defc4c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f3defc4c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f3defc4c

Branch: refs/heads/4.x-cdh5.12
Commit: f3defc4c3031fb8d29464e7077f5a1f67cd525c3
Parents: 376b67f
Author: Vincent Poon 
Authored: Thu Apr 5 18:03:30 2018 +0100
Committer: Pedro Boado 
Committed: Fri Apr 6 22:15:18 2018 +0100

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 40 ++
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 87 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3defc4c/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..631f97f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,15 +41,19 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
@@ -867,6 +871,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+  }
+  }
+
 private void upsertRow(String dml, Connection tenantConn, int i) throws 
SQLException {
 PreparedStatement stmt = tenantConn.prepareStatement(dml);
   stmt.setString(1, "00" + String.valueOf(i));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3defc4c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/ja

[2/3] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-04 Thread pboado
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ce3e5867
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ce3e5867
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ce3e5867

Branch: refs/heads/4.x-cdh5.14
Commit: ce3e5867eef9bf10038cc3729afdcfee1c27aa14
Parents: 98a8bbd
Author: Vincent Poon 
Authored: Wed Apr 4 19:06:24 2018 +0100
Committer: Pedro Boado 
Committed: Wed Apr 4 19:37:35 2018 +0100

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 49 ++--
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 88 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ce3e5867/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..4b88b92 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,29 +41,26 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.EnvironmentEdgeManager;
-import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.util.TestUtil;
+import org.apache.phoenix.util.*;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -867,6 +864,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstan

[2/3] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-04 Thread pboado
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ce3e5867
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ce3e5867
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ce3e5867

Branch: refs/heads/4.x-cdh5.13
Commit: ce3e5867eef9bf10038cc3729afdcfee1c27aa14
Parents: 98a8bbd
Author: Vincent Poon 
Authored: Wed Apr 4 19:06:24 2018 +0100
Committer: Pedro Boado 
Committed: Wed Apr 4 19:37:35 2018 +0100

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 49 ++--
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 88 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ce3e5867/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..4b88b92 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,29 +41,26 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.EnvironmentEdgeManager;
-import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.util.TestUtil;
+import org.apache.phoenix.util.*;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -867,6 +864,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstan

[2/3] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-04 Thread pboado
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ce3e5867
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ce3e5867
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ce3e5867

Branch: refs/heads/4.x-cdh5.12
Commit: ce3e5867eef9bf10038cc3729afdcfee1c27aa14
Parents: 98a8bbd
Author: Vincent Poon 
Authored: Wed Apr 4 19:06:24 2018 +0100
Committer: Pedro Boado 
Committed: Wed Apr 4 19:37:35 2018 +0100

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 49 ++--
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 88 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ce3e5867/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..4b88b92 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,29 +41,26 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.EnvironmentEdgeManager;
-import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.util.TestUtil;
+import org.apache.phoenix.util.*;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -867,6 +864,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstan