[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366671228
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -105,60 +129,52 @@ public void testTooLowSCNWithMaxLookbackAge() throws 
Exception {
 public void testRecentlyDeletedRowsNotCompactedAway() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-String fullIndexName = indexStem + "1";
+createTable(dataTableName);
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
 
 Review comment:
   Done.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366671228
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -105,60 +129,52 @@ public void testTooLowSCNWithMaxLookbackAge() throws 
Exception {
 public void testRecentlyDeletedRowsNotCompactedAway() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-String fullIndexName = indexStem + "1";
+createTable(dataTableName);
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
 
 Review comment:
   Done.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366671327
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -105,60 +129,52 @@ public void testTooLowSCNWithMaxLookbackAge() throws 
Exception {
 public void testRecentlyDeletedRowsNotCompactedAway() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-String fullIndexName = indexStem + "1";
+createTable(dataTableName);
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
 TableName dataTable = TableName.valueOf(dataTableName);
-TableName indexTable = TableName.valueOf(fullIndexName);
-assertRawRowCount(conn, indexTable, ROWS_POPULATED);
-assertTableHasTtl(conn, indexTable, Integer.MAX_VALUE);
-long beforeDeleteSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
-Thread.sleep(1); //make sure we delete at a different ts
+populateTable(dataTableName);
+//make sure we're after the inserts have been committed
+injectEdge.incValue(1);
+long beforeDeleteSCN = EnvironmentEdgeManager.currentTimeMillis();
+injectEdge.incValue(10); //make sure we delete at a different ts
 Statement stmt = conn.createStatement();
 stmt.execute("DELETE FROM " + dataTableName + " WHERE " + " id = 
'a'");
 Assert.assertEquals(1, stmt.getUpdateCount());
 conn.commit();
 //select stmt to get row we deleted
-String sql = String.format("SELECT * FROM %s WHERE val1 = 'ab'", 
dataTableName);
-assertExplainPlan(conn, sql, dataTableName, fullIndexName);
+String sql = String.format("SELECT * FROM %s WHERE id = 'a'", 
dataTableName);
 int rowsPlusDeleteMarker = ROWS_POPULATED;
-assertRawRowCount(conn, indexTable, rowsPlusDeleteMarker);
 assertRowExistsAtSCN(getUrl(), sql, beforeDeleteSCN, true);
 flush(dataTable);
-flush(indexTable);
-assertRawRowCount(conn, indexTable, rowsPlusDeleteMarker);
 assertRowExistsAtSCN(getUrl(), sql, beforeDeleteSCN, true);
-long beforeFirstCompactSCN = EnvironmentEdgeManager.currentTime();
-Thread.sleep(1);
-majorCompact(indexTable, beforeFirstCompactSCN);
-assertRawRowCount(conn, indexTable, rowsPlusDeleteMarker);
+long beforeFirstCompactSCN = 
EnvironmentEdgeManager.currentTimeMillis();
+injectEdge.incValue(1); //new ts for major compaction
+majorCompact(dataTable, beforeFirstCompactSCN);
+assertRawRowCount(conn, dataTable, rowsPlusDeleteMarker);
 assertRowExistsAtSCN(getUrl(), sql, beforeDeleteSCN, true);
 //wait for the lookback time. After this compactions should purge 
the deleted row
-Thread.sleep(MAX_LOOKBACK_AGE * 1000);
-long beforeSecondCompactSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+injectEdge.incValue(MAX_LOOKBACK_AGE * 1000);
+long beforeSecondCompactSCN = 
EnvironmentEdgeManager.currentTimeMillis();
 String notDeletedRowSql =
-String.format("SELECT * FROM %s WHERE val1 = 'bc'", 
dataTableName);
-assertExplainPlan(conn, notDeletedRowSql, dataTableName, 
fullIndexName);
+String.format("SELECT * FROM %s WHERE id = 'b'", 
dataTableName);
 assertRowExistsAtSCN(getUrl(), notDeletedRowSql, 
beforeSecondCompactSCN, true);
-assertRawRowCount(conn, indexTable, ROWS_POPULATED);
 assertRawRowCount(conn, dataTable, ROWS_POPULATED);
 conn.createStatement().execute("upsert into " + dataTableName +
 " values ('c', 'cd', 'cde', 'cdef')");
 conn.commit();
-majorCompact(indexTable, beforeSecondCompactSCN);
 majorCompact(dataTable, beforeSecondCompactSCN);
 assertRawRowCount(conn, dataTable, ROWS_POPULATED);
 //deleted row should be gone, but not deleted row should still be 
there.
 assertRowExistsAtSCN(getUrl(), sql, beforeSecondCompactSCN, false);
 assertRowExistsAtSCN(getUrl(), notDeletedRowSql, 
beforeSecondCompactSCN, true);
 //1 deleted row should be gone
-assertRawRowCount(conn, indexTable, ROWS_POPULATED);
+assertRawRowCount(conn, dataTable, ROWS_POPULATED);
 }
 }
 
 @Test(timeout=6L)
 
 Review comment:
   And now we're 

[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r36667
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -213,51 +274,57 @@ public void testRecentMaxVersionsNotCompactedAway() 
throws Exception {
 String thirdValue = "ghi";
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem, versions);
-long afterInsertSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+String indexName = generateUniqueName();
+createTable(dataTableName);
+//increment by 10 min to make sure we don't "look back" past table 
creation
 
 Review comment:
   Thanks, good catch. Fixed.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366649341
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -168,36 +184,48 @@ public void testTTLAndMaxLookbackAge() throws Exception {
 conf.setLong(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 0L);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-long afterFirstInsertSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+createTable(dataTableName);
+//increment by 10 min to make sure we don't "look back" past table 
creation
 
 Review comment:
   That's not necessary because we always advance time by MAX_LOOKBACK_AGE 
before trying to look back that far. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366649113
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -168,36 +184,48 @@ public void testTTLAndMaxLookbackAge() throws Exception {
 conf.setLong(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 0L);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-long afterFirstInsertSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+createTable(dataTableName);
+//increment by 10 min to make sure we don't "look back" past table 
creation
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
+populateTable(dataTableName);
+injectEdge.incValue(1);
+long afterFirstInsertSCN = 
EnvironmentEdgeManager.currentTimeMillis();
 TableName dataTable = TableName.valueOf(dataTableName);
 assertTableHasTtl(conn, dataTable, ttl);
-String fullIndexName = indexStem + "1";
-TableName indexTable = TableName.valueOf(fullIndexName);
-assertTableHasTtl(conn, indexTable, ttl);
-
 //first make sure we inserted correctly
-String sql = String.format("SELECT val2 FROM %s WHERE val1 = 
'ab'", dataTableName);
-assertExplainPlan(conn, sql, dataTableName, fullIndexName);
+String sql = String.format("SELECT val2 FROM %s WHERE id = 'a'", 
dataTableName);
+  //  assertExplainPlan(conn, sql, dataTableName, fullIndexName);
 assertRowExistsAtSCN(getUrl(),sql, afterFirstInsertSCN, true);
 int originalRowCount = 2;
-assertRawRowCount(conn, indexTable, originalRowCount);
+assertRawRowCount(conn, dataTable, originalRowCount);
 //force a flush
-flush(indexTable);
+flush(dataTable);
 //flush shouldn't have changed it
-assertRawRowCount(conn, indexTable, originalRowCount);
+assertRawRowCount(conn, dataTable, originalRowCount);
+  // assertExplainPlan(conn, sql, dataTableName, 
fullIndexName);
 
 Review comment:
   Not commented out anymore. :-)


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366649181
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -168,36 +184,48 @@ public void testTTLAndMaxLookbackAge() throws Exception {
 conf.setLong(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 0L);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-long afterFirstInsertSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+createTable(dataTableName);
+//increment by 10 min to make sure we don't "look back" past table 
creation
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
+populateTable(dataTableName);
+injectEdge.incValue(1);
+long afterFirstInsertSCN = 
EnvironmentEdgeManager.currentTimeMillis();
 TableName dataTable = TableName.valueOf(dataTableName);
 assertTableHasTtl(conn, dataTable, ttl);
-String fullIndexName = indexStem + "1";
-TableName indexTable = TableName.valueOf(fullIndexName);
-assertTableHasTtl(conn, indexTable, ttl);
-
 //first make sure we inserted correctly
-String sql = String.format("SELECT val2 FROM %s WHERE val1 = 
'ab'", dataTableName);
-assertExplainPlan(conn, sql, dataTableName, fullIndexName);
+String sql = String.format("SELECT val2 FROM %s WHERE id = 'a'", 
dataTableName);
+  //  assertExplainPlan(conn, sql, dataTableName, fullIndexName);
 
 Review comment:
   Not commented out anymore. :-)


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366634151
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -213,62 +241,49 @@ public void testRecentMaxVersionsNotCompactedAway() 
throws Exception {
 String thirdValue = "ghi";
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem, versions);
-long afterInsertSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+createTable(dataTableName);
+//increment by 10 min to make sure we don't "look back" past table 
creation
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
+populateTable(dataTableName);
+injectEdge.incValue(1); //increment by 1 so we can see our write
+long afterInsertSCN = EnvironmentEdgeManager.currentTimeMillis();
 //make sure table and index metadata is set up right for versions
 TableName dataTable = TableName.valueOf(dataTableName);
 assertTableHasVersions(conn, dataTable, versions);
-String fullIndexName = indexStem + "1";
-TableName indexTable = TableName.valueOf(fullIndexName);
 
 Review comment:
   All right, will do. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366627476
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -168,36 +184,48 @@ public void testTTLAndMaxLookbackAge() throws Exception {
 conf.setLong(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 0L);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-long afterFirstInsertSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+createTable(dataTableName);
+//increment by 10 min to make sure we don't "look back" past table 
creation
 
 Review comment:
   Turns out any value >= 1 works. I'll just switch it to 1


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366626438
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -168,36 +184,48 @@ public void testTTLAndMaxLookbackAge() throws Exception {
 conf.setLong(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 0L);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-long afterFirstInsertSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+createTable(dataTableName);
+//increment by 10 min to make sure we don't "look back" past table 
creation
 
 Review comment:
   WAIT_AFTER_TABLE_CREATION isn't meant to be a factor of MAX_LOOKBACK_AGE -- 
it's just an arbitrarily large number to make sure that all the metadata is 
older than the current timestamp. It could probably be lower, but since the 
size doesn't cost us anything I didn't spend too much time trying to optimize 
it. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366625069
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -213,62 +241,49 @@ public void testRecentMaxVersionsNotCompactedAway() 
throws Exception {
 String thirdValue = "ghi";
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem, versions);
-long afterInsertSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+createTable(dataTableName);
+//increment by 10 min to make sure we don't "look back" past table 
creation
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
+populateTable(dataTableName);
+injectEdge.incValue(1); //increment by 1 so we can see our write
+long afterInsertSCN = EnvironmentEdgeManager.currentTimeMillis();
 //make sure table and index metadata is set up right for versions
 TableName dataTable = TableName.valueOf(dataTableName);
 assertTableHasVersions(conn, dataTable, versions);
-String fullIndexName = indexStem + "1";
-TableName indexTable = TableName.valueOf(fullIndexName);
 
 Review comment:
   It was originally about index tables, but I eventually realized that this 
needed to apply to all tables in order for SCN to work properly, so there's 
nothing "index specific" about the functionality anymore.
   
   If the consensus is that we should have specific index cases in here to make 
sure nothing in the indexing coprocs break the general behavior, I'll put some 
(back) in. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366622629
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -105,60 +129,52 @@ public void testTooLowSCNWithMaxLookbackAge() throws 
Exception {
 public void testRecentlyDeletedRowsNotCompactedAway() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-String fullIndexName = indexStem + "1";
+createTable(dataTableName);
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
 
 Review comment:
   The ManualEnvironmentEdge is copied from HBase code, where that's the method 
name. I couldn't use the HBase class because Phoenix's EEM is odd and requires 
a specific abstract subclass of Edge


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [phoenix] gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - BaseScannerRegionObserver should prevent compaction from purg…

2020-01-14 Thread GitBox
gjacoby126 commented on a change in pull request #679: PHOENIX-5645 - 
BaseScannerRegionObserver should prevent compaction from purg…
URL: https://github.com/apache/phoenix/pull/679#discussion_r366622118
 
 

 ##
 File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
 ##
 @@ -105,60 +129,52 @@ public void testTooLowSCNWithMaxLookbackAge() throws 
Exception {
 public void testRecentlyDeletedRowsNotCompactedAway() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String dataTableName = generateUniqueName();
-String indexStem = generateUniqueName();
-createTableAndIndexes(conn, dataTableName, indexStem);
-String fullIndexName = indexStem + "1";
+createTable(dataTableName);
+injectEdge.incValue(WAIT_AFTER_TABLE_CREATION);
 TableName dataTable = TableName.valueOf(dataTableName);
-TableName indexTable = TableName.valueOf(fullIndexName);
-assertRawRowCount(conn, indexTable, ROWS_POPULATED);
-assertTableHasTtl(conn, indexTable, Integer.MAX_VALUE);
-long beforeDeleteSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
-Thread.sleep(1); //make sure we delete at a different ts
+populateTable(dataTableName);
+//make sure we're after the inserts have been committed
+injectEdge.incValue(1);
+long beforeDeleteSCN = EnvironmentEdgeManager.currentTimeMillis();
+injectEdge.incValue(10); //make sure we delete at a different ts
 Statement stmt = conn.createStatement();
 stmt.execute("DELETE FROM " + dataTableName + " WHERE " + " id = 
'a'");
 Assert.assertEquals(1, stmt.getUpdateCount());
 conn.commit();
 //select stmt to get row we deleted
-String sql = String.format("SELECT * FROM %s WHERE val1 = 'ab'", 
dataTableName);
-assertExplainPlan(conn, sql, dataTableName, fullIndexName);
+String sql = String.format("SELECT * FROM %s WHERE id = 'a'", 
dataTableName);
 int rowsPlusDeleteMarker = ROWS_POPULATED;
-assertRawRowCount(conn, indexTable, rowsPlusDeleteMarker);
 assertRowExistsAtSCN(getUrl(), sql, beforeDeleteSCN, true);
 flush(dataTable);
-flush(indexTable);
-assertRawRowCount(conn, indexTable, rowsPlusDeleteMarker);
 assertRowExistsAtSCN(getUrl(), sql, beforeDeleteSCN, true);
-long beforeFirstCompactSCN = EnvironmentEdgeManager.currentTime();
-Thread.sleep(1);
-majorCompact(indexTable, beforeFirstCompactSCN);
-assertRawRowCount(conn, indexTable, rowsPlusDeleteMarker);
+long beforeFirstCompactSCN = 
EnvironmentEdgeManager.currentTimeMillis();
+injectEdge.incValue(1); //new ts for major compaction
+majorCompact(dataTable, beforeFirstCompactSCN);
+assertRawRowCount(conn, dataTable, rowsPlusDeleteMarker);
 assertRowExistsAtSCN(getUrl(), sql, beforeDeleteSCN, true);
 //wait for the lookback time. After this compactions should purge 
the deleted row
-Thread.sleep(MAX_LOOKBACK_AGE * 1000);
-long beforeSecondCompactSCN = 
org.apache.phoenix.util.EnvironmentEdgeManager.currentTimeMillis();
+injectEdge.incValue(MAX_LOOKBACK_AGE * 1000);
+long beforeSecondCompactSCN = 
EnvironmentEdgeManager.currentTimeMillis();
 String notDeletedRowSql =
-String.format("SELECT * FROM %s WHERE val1 = 'bc'", 
dataTableName);
-assertExplainPlan(conn, notDeletedRowSql, dataTableName, 
fullIndexName);
+String.format("SELECT * FROM %s WHERE id = 'b'", 
dataTableName);
 assertRowExistsAtSCN(getUrl(), notDeletedRowSql, 
beforeSecondCompactSCN, true);
-assertRawRowCount(conn, indexTable, ROWS_POPULATED);
 assertRawRowCount(conn, dataTable, ROWS_POPULATED);
 conn.createStatement().execute("upsert into " + dataTableName +
 " values ('c', 'cd', 'cde', 'cdef')");
 conn.commit();
-majorCompact(indexTable, beforeSecondCompactSCN);
 majorCompact(dataTable, beforeSecondCompactSCN);
 assertRawRowCount(conn, dataTable, ROWS_POPULATED);
 //deleted row should be gone, but not deleted row should still be 
there.
 assertRowExistsAtSCN(getUrl(), sql, beforeSecondCompactSCN, false);
 assertRowExistsAtSCN(getUrl(), notDeletedRowSql, 
beforeSecondCompactSCN, true);
 //1 deleted row should be gone
-assertRawRowCount(conn, indexTable, ROWS_POPULATED);
+assertRawRowCount(conn, dataTable, ROWS_POPULATED);
 }
 }
 
 @Test(timeout=6L)
 
 Review comment:
   Not sure exactly