This is an automated email from the ASF dual-hosted git repository.

dlych pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/asterixdb.git

commit f480e26f39d09ddc6b4a28abaa0cf18768860a23
Author: Murtadha Hubail <[email protected]>
AuthorDate: Fri Jan 22 00:52:05 2021 +0300

    [NO ISSUE][STO] Keep track of flushing memory components
    
    - user model changes: no
    - storage format changes: no
    - interface changes: no
    
    Details:
    
    - Indicate a memory component is full only if it is a component
      pending a flush.
    
    Change-Id: I121d842f823f5a615bd3833f11370334ff28ad2e
    Reviewed-on: https://asterix-gerrit.ics.uci.edu/c/asterixdb/+/9703
    Reviewed-by: Murtadha Hubail <[email protected]>
    Reviewed-by: Ali Alsuliman <[email protected]>
    Integration-Tests: Jenkins <[email protected]>
    Tested-by: Jenkins <[email protected]>
---
 .../apache/asterix/common/context/GlobalVirtualBufferCache.java    | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/context/GlobalVirtualBufferCache.java
 
b/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/context/GlobalVirtualBufferCache.java
index a51e13c..0bb9bd5 100644
--- 
a/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/context/GlobalVirtualBufferCache.java
+++ 
b/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/context/GlobalVirtualBufferCache.java
@@ -73,6 +73,7 @@ public class GlobalVirtualBufferCache implements 
IVirtualBufferCache, ILifeCycle
     private final List<ILSMIndex> primaryIndexes = new ArrayList<>();
 
     private final Set<ILSMIndex> flushingIndexes = 
Collections.synchronizedSet(new HashSet<>());
+    private final Set<ILSMMemoryComponent> flushingComponents = 
Collections.synchronizedSet(new HashSet<>());
     private volatile int flushPtr;
 
     private final int filteredMemoryComponentMaxNumPages;
@@ -164,6 +165,7 @@ public class GlobalVirtualBufferCache implements 
IVirtualBufferCache, ILifeCycle
 
     @Override
     public void flushed(ILSMMemoryComponent memoryComponent) throws 
HyracksDataException {
+        flushingComponents.remove(memoryComponent);
         if (flushingIndexes.remove(memoryComponent.getLsmIndex())) {
             LOGGER.info("Completed flushing {}.", memoryComponent.getIndex());
             // After the flush operation is completed, we may have 2 cases:
@@ -208,8 +210,7 @@ public class GlobalVirtualBufferCache implements 
IVirtualBufferCache, ILifeCycle
 
     @Override
     public boolean isFull(ILSMMemoryComponent memoryComponent) {
-        return flushingIndexes.contains(memoryComponent.getLsmIndex())
-                || isFilteredMemoryComponentFull(memoryComponent);
+        return flushingComponents.contains(memoryComponent) || 
isFilteredMemoryComponentFull(memoryComponent);
     }
 
     private boolean isFilteredMemoryComponentFull(ILSMMemoryComponent 
memoryComponent) {
@@ -508,7 +509,6 @@ public class GlobalVirtualBufferCache implements 
IVirtualBufferCache, ILifeCycle
                                 // future writers
                                 memoryComponent.setUnwritable();
                             }
-
                             opTracker.setFlushOnExit(true);
                             opTracker.flushIfNeeded();
                             // If the flush cannot be scheduled at this time, 
then there must be active writers.
@@ -522,6 +522,7 @@ public class GlobalVirtualBufferCache implements 
IVirtualBufferCache, ILifeCycle
                         if ((flushable || opTracker.isFlushLogCreated()) && 
!isMetadataIndex(primaryIndex)) {
                             // global vbc cannot wait on metadata indexes 
because metadata indexes support full
                             // ACID transactions. Waiting on metadata indexes 
can introduce deadlocks.
+                            
flushingComponents.add(primaryIndex.getCurrentMemoryComponent());
                             return primaryIndex;
                         }
                     }

Reply via email to