This is an automated email from the ASF dual-hosted git repository.

liujun pushed a commit to branch 3.3
in repository https://gitbox.apache.org/repos/asf/dubbo.git


The following commit(s) were added to refs/heads/3.3 by this push:
     new 49ede4fc47 bugfix: the issue of possible infinite loop when cleaning 
up expired metadata info (#15086)
49ede4fc47 is described below

commit 49ede4fc4737a40797799e9d29aa7b259f2a7a21
Author: funkye <[email protected]>
AuthorDate: Wed Feb 5 11:43:35 2025 +0800

    bugfix: the issue of possible infinite loop when cleaning up expired 
metadata info (#15086)
    
    fixes #15087
---
 .../registry/client/AbstractServiceDiscovery.java  | 64 ++++++++++++----------
 1 file changed, 36 insertions(+), 28 deletions(-)

diff --git 
a/dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/client/AbstractServiceDiscovery.java
 
b/dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/client/AbstractServiceDiscovery.java
index b05408caac..61eaf54d4e 100644
--- 
a/dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/client/AbstractServiceDiscovery.java
+++ 
b/dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/client/AbstractServiceDiscovery.java
@@ -35,13 +35,14 @@ import 
org.apache.dubbo.registry.client.metadata.ServiceInstanceMetadataUtils;
 import org.apache.dubbo.registry.client.metadata.store.MetaCacheManager;
 import org.apache.dubbo.rpc.model.ApplicationModel;
 
+import java.util.ArrayList;
+import java.util.Comparator;
 import java.util.List;
 import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
 
 import static 
org.apache.dubbo.common.constants.CommonConstants.DEFAULT_METADATA_INFO_CACHE_EXPIRE;
 import static 
org.apache.dubbo.common.constants.CommonConstants.DEFAULT_METADATA_INFO_CACHE_SIZE;
@@ -50,7 +51,6 @@ import static 
org.apache.dubbo.common.constants.CommonConstants.METADATA_INFO_CA
 import static 
org.apache.dubbo.common.constants.CommonConstants.METADATA_INFO_CACHE_SIZE_KEY;
 import static 
org.apache.dubbo.common.constants.CommonConstants.REGISTRY_LOCAL_FILE_CACHE_ENABLED;
 import static 
org.apache.dubbo.common.constants.CommonConstants.REMOTE_METADATA_STORAGE_TYPE;
-import static 
org.apache.dubbo.common.constants.LoggerCodeConstants.INTERNAL_ERROR;
 import static 
org.apache.dubbo.common.constants.LoggerCodeConstants.REGISTRY_FAILED_LOAD_METADATA;
 import static 
org.apache.dubbo.common.constants.RegistryConstants.REGISTRY_CLUSTER_KEY;
 import static org.apache.dubbo.metadata.RevisionResolver.EMPTY_REVISION;
@@ -70,7 +70,7 @@ public abstract class AbstractServiceDiscovery implements 
ServiceDiscovery {
     protected volatile ServiceInstance serviceInstance;
     protected volatile MetadataInfo metadataInfo;
     protected final ConcurrentHashMap<String, MetadataInfoStat> metadataInfos 
= new ConcurrentHashMap<>();
-    protected final ScheduledFuture<?> refreshCacheFuture;
+    protected volatile ScheduledFuture<?> refreshCacheFuture;
     protected MetadataReport metadataReport;
     protected String metadataType;
     protected final MetaCacheManager metaCacheManager;
@@ -110,36 +110,44 @@ public abstract class AbstractServiceDiscovery implements 
ServiceDiscovery {
                 registryURL.getParameter(METADATA_INFO_CACHE_EXPIRE_KEY, 
DEFAULT_METADATA_INFO_CACHE_EXPIRE);
         int metadataInfoCacheSize =
                 registryURL.getParameter(METADATA_INFO_CACHE_SIZE_KEY, 
DEFAULT_METADATA_INFO_CACHE_SIZE);
+        startRefreshCache(metadataInfoCacheExpireTime / 2, 
metadataInfoCacheSize, metadataInfoCacheExpireTime);
+    }
+
+    private void removeExpiredMetadataInfo(int metadataInfoCacheSize, int 
metadataInfoCacheExpireTime) {
+        Long nextTime = null;
+        // Cache cleanup is only required when the cache size exceeds the 
cache limit.
+        if (metadataInfos.size() > metadataInfoCacheSize) {
+            List<MetadataInfoStat> values = new 
ArrayList<>(metadataInfos.values());
+            // Place the earliest data at the front
+            
values.sort(Comparator.comparingLong(MetadataInfoStat::getUpdateTime));
+            for (MetadataInfoStat v : values) {
+                long time = System.currentTimeMillis() - v.getUpdateTime();
+                if (time > metadataInfoCacheExpireTime) {
+                    metadataInfos.remove(v.metadataInfo.getRevision(), v);
+                } else {
+                    // Calculate how long it will take for the next task to 
start
+                    nextTime = metadataInfoCacheExpireTime - time;
+                    break;
+                }
+            }
+        }
+        // If there is no metadata to clean up this time, the next task will 
start within half of the cache expiration
+        // time.
+        startRefreshCache(
+                nextTime == null ? metadataInfoCacheExpireTime / 2 : nextTime,
+                metadataInfoCacheSize,
+                metadataInfoCacheExpireTime);
+    }
+
+    private void startRefreshCache(long nextTime, int metadataInfoCacheSize, 
int metadataInfoCacheExpireTime) {
         this.refreshCacheFuture = applicationModel
                 .getFrameworkModel()
                 .getBeanFactory()
                 .getBean(FrameworkExecutorRepository.class)
                 .getSharedScheduledExecutor()
-                .scheduleAtFixedRate(
-                        () -> {
-                            try {
-                                while (metadataInfos.size() > 
metadataInfoCacheSize) {
-                                    AtomicReference<String> oldestRevision = 
new AtomicReference<>();
-                                    AtomicReference<MetadataInfoStat> 
oldestStat = new AtomicReference<>();
-                                    metadataInfos.forEach((k, v) -> {
-                                        if (System.currentTimeMillis() - 
v.getUpdateTime() > metadataInfoCacheExpireTime
-                                                && (oldestStat.get() == null
-                                                        || 
oldestStat.get().getUpdateTime() > v.getUpdateTime())) {
-                                            oldestRevision.set(k);
-                                            oldestStat.set(v);
-                                        }
-                                    });
-                                    if (oldestStat.get() != null) {
-                                        
metadataInfos.remove(oldestRevision.get(), oldestStat.get());
-                                    }
-                                }
-                            } catch (Throwable t) {
-                                logger.error(
-                                        INTERNAL_ERROR, "", "", "Error 
occurred when clean up metadata info cache.", t);
-                            }
-                        },
-                        metadataInfoCacheExpireTime / 2,
-                        metadataInfoCacheExpireTime / 2,
+                .schedule(
+                        () -> removeExpiredMetadataInfo(metadataInfoCacheSize, 
metadataInfoCacheExpireTime),
+                        nextTime,
                         TimeUnit.MILLISECONDS);
     }
 

Reply via email to