This is an automated email from the ASF dual-hosted git repository.

rbalamohan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 6a5c0cd  HIVE-22753: Fix gradual mem leak: Operationlog related 
appenders should be cleared up on errors (Rajesh Balamohan, reviewed by 
Ashutosh Chauhan)
6a5c0cd is described below

commit 6a5c0cd04a2e88a545a96d10a942c86b2be18daa
Author: Rajesh Balamohan <[email protected]>
AuthorDate: Tue Jan 28 08:41:35 2020 +0530

    HIVE-22753: Fix gradual mem leak: Operationlog related appenders should be 
cleared up on errors (Rajesh Balamohan, reviewed by Ashutosh Chauhan)
---
 .../ql/log/HushableRandomAccessFileAppender.java   | 32 ++++++++++++++++++++++
 1 file changed, 32 insertions(+)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/log/HushableRandomAccessFileAppender.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/log/HushableRandomAccessFileAppender.java
index 0ff66df..7e60435 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/log/HushableRandomAccessFileAppender.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/log/HushableRandomAccessFileAppender.java
@@ -20,7 +20,11 @@ package org.apache.hadoop.hive.ql.log;
 import java.io.Serializable;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
 import org.apache.logging.log4j.core.Filter;
 import org.apache.logging.log4j.core.Layout;
 import org.apache.logging.log4j.core.LogEvent;
@@ -47,6 +51,17 @@ import org.apache.logging.log4j.core.util.Integers;
 public final class HushableRandomAccessFileAppender extends
     AbstractOutputStreamAppender<RandomAccessFileManager> {
 
+  private static final LoadingCache<String, String> CLOSED_FILES =
+      CacheBuilder.newBuilder().maximumSize(1000)
+          .expireAfterWrite(1, TimeUnit.SECONDS)
+          .build(new CacheLoader<String, String>() {
+            @Override
+            public String load(String key) throws Exception {
+              return key;
+            }
+          });
+
+
   private final String fileName;
   private Object advertisement;
   private final Advertiser advertiser;
@@ -71,6 +86,7 @@ public final class HushableRandomAccessFileAppender extends
   @Override
   public void stop() {
     super.stop();
+    CLOSED_FILES.put(fileName, fileName);
     if (advertiser != null) {
       advertiser.unadvertise(advertisement);
     }
@@ -172,6 +188,22 @@ public final class HushableRandomAccessFileAppender extends
           + name);
       return null;
     }
+
+    /**
+     * In corner cases (e.g exceptions), there seem to be some race between
+     * com.lmax.disruptor.BatchEventProcessor and HS2 thread which is actually
+     * stopping the logs. Because of this, same filename is recreated and
+     * stop() would never be invoked on that instance, causing a mem leak.
+     * To prevent same file being recreated within very short time,
+     * CLOSED_FILES are tracked in cache with TTL of 1 second. This
+     * also helps in avoiding the stale directories created.
+     */
+    if (CLOSED_FILES.getIfPresent(fileName) != null) {
+      // Do not create another file, which got closed in last 5 seconds
+      LOGGER.error(fileName + " was closed recently.");
+      return null;
+    }
+
     if (layout == null) {
       layout = PatternLayout.createDefaultLayout();
     }

Reply via email to