This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 271f25f  [Bug] Fix bug that descriptor table is not reset before 
planning next routine load task (#3605)
271f25f is described below

commit 271f25f0a4e98c3d9130c0772bc386e7786cbae4
Author: Mingyu Chen <[email protected]>
AuthorDate: Mon May 18 10:34:21 2020 +0800

    [Bug] Fix bug that descriptor table is not reset before planning next 
routine load task (#3605)
    
    Before planning for next routine load task, the analyzer and descriptor 
table
    in it should be reset. Otherwise, a lot of historical objects will
    accumulate inside, causing memory leaks.
---
 fe/src/main/java/org/apache/doris/analysis/DescriptorTable.java  | 6 ++----
 fe/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java | 7 ++++++-
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/fe/src/main/java/org/apache/doris/analysis/DescriptorTable.java 
b/fe/src/main/java/org/apache/doris/analysis/DescriptorTable.java
index 00040b1..4b68673 100644
--- a/fe/src/main/java/org/apache/doris/analysis/DescriptorTable.java
+++ b/fe/src/main/java/org/apache/doris/analysis/DescriptorTable.java
@@ -41,17 +41,15 @@ import java.util.List;
 public class DescriptorTable {
     private final static Logger LOG = 
LogManager.getLogger(DescriptorTable.class);
 
-    private final HashMap<TupleId, TupleDescriptor> tupleDescs;
+    private final HashMap<TupleId, TupleDescriptor> tupleDescs = new 
HashMap<TupleId, TupleDescriptor>();
     // List of referenced tables with no associated TupleDescriptor to ship to 
the BE.
     // For example, the output table of an insert query.
-    private final List<Table>                       referencedTables;
+    private final List<Table> referencedTables = new ArrayList<Table>();;
     private final IdGenerator<TupleId> tupleIdGenerator_ = 
TupleId.createGenerator();
     private final IdGenerator<SlotId> slotIdGenerator_ = 
SlotId.createGenerator();
     private final HashMap<SlotId, SlotDescriptor> slotDescs = 
Maps.newHashMap();
 
     public DescriptorTable() {
-        tupleDescs = new HashMap<TupleId, TupleDescriptor>();
-        referencedTables = new ArrayList<Table>();
     }
 
     public TupleDescriptor createTupleDescriptor() {
diff --git a/fe/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java 
b/fe/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
index 6bc92a9..bc96844 100644
--- a/fe/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
+++ b/fe/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java
@@ -77,19 +77,24 @@ public class StreamLoadPlanner {
         this.db = db;
         this.destTable = destTable;
         this.streamLoadTask = streamLoadTask;
-        analyzer = new Analyzer(Catalog.getInstance(), null);
+    }
+
+    private void resetAnalyzer() {
+        analyzer = new Analyzer(Catalog.getCurrentCatalog(), null);
         // TODO(cmy): currently we do not support UDF in stream load command.
         // Because there is no way to check the privilege of accessing UDF..
         analyzer.setUDFAllowed(false);
         descTable = analyzer.getDescTbl();
     }
 
+    // can only be called after "plan()", or it will return null
     public OlapTable getDestTable() {
         return destTable;
     }
 
     // create the plan. the plan's query id and load id are same, using the 
parameter 'loadId'
     public TExecPlanFragmentParams plan(TUniqueId loadId) throws UserException 
{
+        resetAnalyzer();
         // construct tuple descriptor, used for scanNode and dataSink
         TupleDescriptor tupleDesc = 
descTable.createTupleDescriptor("DstTableTuple");
         boolean negative = streamLoadTask.getNegative();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to