imay closed pull request #377: Support TRUNCATE TABLE stmt
URL: https://github.com/apache/incubator-doris/pull/377
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/docs/help/Contents/Data Definition/ddl_stmt.md 
b/docs/help/Contents/Data Definition/ddl_stmt.md
index ad389ed7..2fd8eff2 100644
--- a/docs/help/Contents/Data Definition/ddl_stmt.md    
+++ b/docs/help/Contents/Data Definition/ddl_stmt.md    
@@ -960,3 +960,30 @@
 ## keyword
     HLL
 
+# TRUNCATE TABLE
+## description
+    该语句用于清空指定表和分区的数据
+    语法:
+
+        TRUNCATE TABLE [db.]tbl[ PARTITION(p1, p2, ...)];
+    
+    说明:
+        1. 该语句清空数据,但保留表或分区。
+        2. 不同于 DELETE,该语句只能整体清空指定的表或分区,不能添加过滤条件。
+        3. 不同于 DELETE,使用该方式清空数据不会对查询性能造成影响。
+        4. 该操作删除的数据不可恢复。
+        5. 使用该命令时,表状态需为 NORMAL,即不允许正在进行 SCHEMA CHANGE 等操作。
+        
+## example
+
+    1. 清空 example_db 下的表 tbl
+
+        TRUNCATE TABLE example_db.tbl;
+
+    2. 清空表 tbl 的 p1 和 p2 分区
+
+        TRUNCATE TABLE tbl PARTITION(p1, p2);
+
+## keyword
+    TRUNCATE,TABLE
+
diff --git a/fe/src/main/cup/sql_parser.cup b/fe/src/main/cup/sql_parser.cup
index a1e2c943..0577bce3 100644
--- a/fe/src/main/cup/sql_parser.cup
+++ b/fe/src/main/cup/sql_parser.cup
@@ -221,7 +221,7 @@ terminal String KW_ADD, KW_ADMIN, KW_AFTER, KW_AGGREGATE, 
KW_ALL, KW_ALTER, KW_A
     KW_SMALLINT, KW_SNAPSHOT, KW_SONAME, KW_SPLIT, KW_START, KW_STATUS, 
KW_STORAGE, KW_STRING, 
     KW_SUM, KW_SUPERUSER, KW_SYNC, KW_SYSTEM, 
     KW_TABLE, KW_TABLES, KW_TABLET, KW_TERMINATED, KW_THAN, KW_THEN, 
KW_TIMESTAMP, KW_TINYINT,
-    KW_TO, KW_TRANSACTION, KW_TRIGGERS, KW_TRIM, KW_TRUE, KW_TYPE, KW_TYPES,
+    KW_TO, KW_TRANSACTION, KW_TRIGGERS, KW_TRIM, KW_TRUE, KW_TRUNCATE, 
KW_TYPE, KW_TYPES,
     KW_UNCOMMITTED, KW_UNBOUNDED, KW_UNION, KW_UNIQUE, KW_UNSIGNED, KW_USE, 
KW_USER, KW_USING,
     KW_VALUES, KW_VARCHAR, KW_VARIABLES, KW_VIEW,
     KW_WARNINGS, KW_WHEN, KW_WHITELIST, KW_WHERE, KW_WITH, KW_WORK, KW_WRITE;
@@ -242,7 +242,7 @@ terminal String COMMENTED_PLAN_HINTS;
 // Statement that the result of this parser.
 nonterminal StatementBase query, stmt, show_stmt, show_param, help_stmt, 
load_stmt, describe_stmt, alter_stmt,
     use_stmt, kill_stmt, drop_stmt, recover_stmt, grant_stmt, revoke_stmt, 
create_stmt, set_stmt, sync_stmt, cancel_stmt, cancel_param, delete_stmt,
-    link_stmt, migrate_stmt, enter_stmt, unsupported_stmt, export_stmt, 
admin_stmt, import_columns_stmt, import_where_stmt;
+    link_stmt, migrate_stmt, enter_stmt, unsupported_stmt, export_stmt, 
admin_stmt, truncate_stmt, import_columns_stmt, import_where_stmt;
 
 nonterminal ImportColumnDesc import_column_desc;
 nonterminal List<ImportColumnDesc> import_column_descs;
@@ -526,6 +526,8 @@ stmt ::=
     {: RESULT = stmt; :}
     | admin_stmt : stmt
     {: RESULT = stmt; :}
+    | truncate_stmt : stmt
+    {: RESULT = stmt; :}
     | /* empty: query only has comments */
     {:
         RESULT = new EmptyStmt();
@@ -3482,6 +3484,13 @@ admin_stmt ::=
     :}
     ;
 
+truncate_stmt ::=
+    KW_TRUNCATE KW_TABLE base_table_ref:tblRef
+    {:
+        RESULT = new TruncateTableStmt(tblRef);
+    :}
+    ;
+
 unsupported_stmt ::=
     KW_START KW_TRANSACTION opt_with_consistent_snapshot:v
     {:
@@ -3727,6 +3736,8 @@ keyword ::=
     {: RESULT = id; :}
     | KW_TRIGGERS:id
     {: RESULT = id; :}
+    | KW_TRUNCATE:id
+    {: RESULT = id; :}
     | KW_TYPE:id
     {: RESULT = id; :}
     | KW_TYPES:id
diff --git a/fe/src/main/java/org/apache/doris/analysis/DropTableStmt.java 
b/fe/src/main/java/org/apache/doris/analysis/DropTableStmt.java
index e53d8024..186ab3cb 100644
--- a/fe/src/main/java/org/apache/doris/analysis/DropTableStmt.java
+++ b/fe/src/main/java/org/apache/doris/analysis/DropTableStmt.java
@@ -67,8 +67,8 @@ public void analyze(Analyzer analyzer) throws 
AnalysisException, UserException {
             tableName.setDb(analyzer.getDefaultDb());
         }
         tableName.analyze(analyzer);
-        // check access
 
+        // check access
         if 
(!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), 
tableName.getDb(),
                                                                 
tableName.getTbl(), PrivPredicate.DROP)) {
             
ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, 
"DROP");
diff --git a/fe/src/main/java/org/apache/doris/analysis/TruncateTableStmt.java 
b/fe/src/main/java/org/apache/doris/analysis/TruncateTableStmt.java
new file mode 100644
index 00000000..118fdda6
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/analysis/TruncateTableStmt.java
@@ -0,0 +1,74 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.analysis;
+
+import org.apache.doris.catalog.Catalog;
+import org.apache.doris.common.AnalysisException;
+import org.apache.doris.common.ErrorCode;
+import org.apache.doris.common.ErrorReport;
+import org.apache.doris.common.UserException;
+import org.apache.doris.mysql.privilege.PrivPredicate;
+import org.apache.doris.qe.ConnectContext;
+
+import com.google.common.base.Joiner;
+
+// TRUNCATE TABLE tbl [PARTITION(p1, p2, ...)]
+public class TruncateTableStmt extends DdlStmt {
+
+    private TableRef tblRef;
+
+    public TruncateTableStmt(TableRef tblRef) {
+        this.tblRef = tblRef;
+    }
+
+    public TableRef getTblRef() {
+        return tblRef;
+    }
+
+    @Override
+    public void analyze(Analyzer analyzer) throws AnalysisException, 
UserException {
+        super.analyze(analyzer);
+        tblRef.getName().analyze(analyzer);
+
+        if (tblRef.hasExplicitAlias()) {
+            throw new AnalysisException("Not support truncate table with 
alias");
+        }
+
+        // check access
+        // it requires LOAD privilege, because we consider this operation as 
'delete data', which is also a
+        // 'load' operation.
+        if 
(!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), 
tblRef.getName().getDb(),
+                tblRef.getName().getTbl(), PrivPredicate.LOAD)) {
+            
ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, 
"LOAD");
+        }
+    }
+
+    @Override
+    public String toSql() {
+        StringBuilder sb = new StringBuilder();
+        sb.append("TRUNCATE TABLE ");
+        sb.append(tblRef.getName().toSql());
+        if (tblRef.getPartitions() != null && 
!tblRef.getPartitions().isEmpty()) {
+            sb.append(" PARTITION (");
+            sb.append(Joiner.on(", ").join(tblRef.getPartitions()));
+            sb.append(")");
+        }
+        return sb.toString();
+    }
+
+}
diff --git a/fe/src/main/java/org/apache/doris/backup/RestoreJob.java 
b/fe/src/main/java/org/apache/doris/backup/RestoreJob.java
index 6ac67862..f59a49f6 100644
--- a/fe/src/main/java/org/apache/doris/backup/RestoreJob.java
+++ b/fe/src/main/java/org/apache/doris/backup/RestoreJob.java
@@ -365,7 +365,7 @@ private void checkAndPrepareMeta() {
                 }
                 
                 if (tbl.getType() != TableType.OLAP) {
-                    status = new Status(ErrCode.COMMON_ERROR, "Only support 
retore olap table: " + tbl.getName());
+                    status = new Status(ErrCode.COMMON_ERROR, "Only support 
retore OLAP table: " + tbl.getName());
                     return;
                 }
                 
@@ -662,7 +662,7 @@ private void checkAndPrepareMeta() {
                         long remotePartId = backupPartitionInfo.id;
                         Range<PartitionKey> remoteRange = 
remotePartitionInfo.getRange(remotePartId);
                         DataProperty remoteDataProperty = 
remotePartitionInfo.getDataProperty(remotePartId);
-                        
localPartitionInfo.addPartitionForRestore(restoredPart.getId(), remoteRange,
+                        localPartitionInfo.addPartition(restoredPart.getId(), 
remoteRange,
                                                                   
remoteDataProperty, (short) restoreReplicationNum);
                         localTbl.addPartition(restoredPart);
                     }
@@ -857,7 +857,7 @@ private void replayCheckAndPrepareMeta() {
                 long remotePartId = backupPartitionInfo.id;
                 Range<PartitionKey> remoteRange = 
remotePartitionInfo.getRange(remotePartId);
                 DataProperty remoteDataProperty = 
remotePartitionInfo.getDataProperty(remotePartId);
-                localPartitionInfo.addPartitionForRestore(restorePart.getId(), 
remoteRange,
+                localPartitionInfo.addPartition(restorePart.getId(), 
remoteRange,
                                                           remoteDataProperty, 
(short) restoreReplicationNum);
                 localTbl.addPartition(restorePart);
 
diff --git a/fe/src/main/java/org/apache/doris/catalog/Catalog.java 
b/fe/src/main/java/org/apache/doris/catalog/Catalog.java
index 4e6b700e..c354ea2a 100644
--- a/fe/src/main/java/org/apache/doris/catalog/Catalog.java
+++ b/fe/src/main/java/org/apache/doris/catalog/Catalog.java
@@ -17,20 +17,6 @@
 
 package org.apache.doris.catalog;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Joiner.MapJoiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Range;
-import com.google.common.collect.Sets;
-import com.sleepycat.je.rep.InsufficientLogException;
-import com.sleepycat.je.rep.NetworkRestore;
-import com.sleepycat.je.rep.NetworkRestoreConfig;
-
 import org.apache.doris.alter.Alter;
 import org.apache.doris.alter.AlterJob;
 import org.apache.doris.alter.AlterJob.JobType;
@@ -75,7 +61,10 @@
 import org.apache.doris.analysis.RollupRenameClause;
 import org.apache.doris.analysis.ShowAlterStmt.AlterType;
 import org.apache.doris.analysis.SingleRangePartitionDesc;
+import org.apache.doris.analysis.TableName;
+import org.apache.doris.analysis.TableRef;
 import org.apache.doris.analysis.TableRenameClause;
+import org.apache.doris.analysis.TruncateTableStmt;
 import org.apache.doris.analysis.UserDesc;
 import org.apache.doris.analysis.UserIdentity;
 import org.apache.doris.backup.AbstractBackupJob_D;
@@ -156,6 +145,7 @@
 import org.apache.doris.persist.Storage;
 import org.apache.doris.persist.StorageInfo;
 import org.apache.doris.persist.TableInfo;
+import org.apache.doris.persist.TruncateTableInfo;
 import org.apache.doris.qe.ConnectContext;
 import org.apache.doris.qe.JournalObservable;
 import org.apache.doris.qe.SessionVariable;
@@ -176,6 +166,21 @@
 import org.apache.doris.thrift.TTaskType;
 import org.apache.doris.transaction.GlobalTransactionMgr;
 import org.apache.doris.transaction.PublishVersionDaemon;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Joiner.MapJoiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Range;
+import com.google.common.collect.Sets;
+import com.sleepycat.je.rep.InsufficientLogException;
+import com.sleepycat.je.rep.NetworkRestore;
+import com.sleepycat.je.rep.NetworkRestoreConfig;
+
 import org.apache.kudu.ColumnSchema;
 import org.apache.kudu.Schema;
 import org.apache.kudu.client.CreateTableOptions;
@@ -2814,8 +2819,8 @@ public void addPartition(Database db, String tableName, 
AddPartitionClause addPa
                 }
 
                 // check if meta changed
-                // rollup index may be added or dropped during add partition op
-                // schema may be changed during add partition op
+                // rollup index may be added or dropped during add partition 
operation.
+                // schema may be changed during add partition operation.
                 boolean metaChanged = false;
                 if (olapTable.getIndexNameToId().size() != 
indexIdToSchema.size()) {
                     metaChanged = true;
@@ -3823,7 +3828,6 @@ public void replayCreateTable(String dbName, Table table) 
{
                 } // end for partitions
             }
         }
-
     }
 
     private void createTablets(String clusterName, MaterializedIndex index, 
ReplicaState replicaState,
@@ -5501,5 +5505,217 @@ public String dumpImage() {
         LOG.info("finished dumpping image to {}", dumpFilePath);
         return dumpFilePath;
     }
+
+    /*
+     * Truncate specified table or partitions.
+     * The main idea is:
+     * 
+     * 1. using the same schema to create new table(partitions)
+     * 2. use the new created table(partitions) to replace the old ones.
+     * 
+     */
+    public void truncateTable(TruncateTableStmt truncateTableStmt) throws 
DdlException {
+        TableRef tblRef = truncateTableStmt.getTblRef();
+        TableName dbTbl = tblRef.getName();
+
+        // check, and save some info which need to be checked again later
+        Map<String, Long> origPartitions = Maps.newHashMap();
+        OlapTable copiedTbl = null;
+        Database db = getDb(dbTbl.getDb());
+        if (db == null) {
+            ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, 
dbTbl.getDb());
+        }
+
+        db.readLock();
+        try {
+            Table table = db.getTable(dbTbl.getTbl());
+            if (table == null) {
+                ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, 
dbTbl.getTbl());
+            }
+
+            if (table.getType() != TableType.OLAP) {
+                throw new DdlException("Only support truncate OLAP table");
+            }
+
+            OlapTable olapTable = (OlapTable) table;
+            if (olapTable.getState() != OlapTableState.NORMAL) {
+                throw new DdlException("Table' state is not NORMAL: " + 
olapTable.getState());
+            }
+            
+            if (tblRef.getPartitions() != null && 
!tblRef.getPartitions().isEmpty()) {
+                for (String partName: tblRef.getPartitions()) {
+                    Partition partition = olapTable.getPartition(partName);
+                    if (partition == null) {
+                        throw new DdlException("Partition " + partName + " 
does not exist");
+                    }
+                    
+                    origPartitions.put(partName, partition.getId());
+                }
+            } else {
+                for (Partition partition : olapTable.getPartitions()) {
+                    origPartitions.put(partition.getName(), partition.getId());
+                }
+            }
+            
+            copiedTbl = olapTable.selectiveCopy(origPartitions.keySet());
+
+        } finally {
+            db.readUnlock();
+        }
+        
+        // 2. use the copied table to create partitions
+        List<Partition> newPartitions = Lists.newArrayList();
+        // tabletIdSet to save all newly created tablet ids.
+        Set<Long> tabletIdSet = Sets.newHashSet();
+        try {
+            for (Map.Entry<String, Long> entry : origPartitions.entrySet()) {
+                // the new partition must use new id
+                // If we still use the old partition id, the behavior of 
current load jobs on this partition
+                // will be undefined.
+                // By using a new id, load job will be aborted(just like 
partition is dropped),
+                // which is the right behavior.
+                long oldPartitionId = entry.getValue();
+                long newPartitionId = getNextId();
+                Partition newPartition = 
createPartitionWithIndices(db.getClusterName(),
+                        db.getId(), copiedTbl.getId(), newPartitionId,
+                        entry.getKey(),
+                        copiedTbl.getIndexIdToShortKeyColumnCount(),
+                        copiedTbl.getIndexIdToSchemaHash(),
+                        copiedTbl.getIndexIdToStorageType(),
+                        copiedTbl.getIndexIdToSchema(),
+                        copiedTbl.getKeysType(),
+                        copiedTbl.getDefaultDistributionInfo(),
+                        
copiedTbl.getPartitionInfo().getDataProperty(oldPartitionId).getStorageMedium(),
+                        
copiedTbl.getPartitionInfo().getReplicationNum(oldPartitionId),
+                        null /* version info */,
+                        copiedTbl.getCopiedBfColumns(),
+                        copiedTbl.getBfFpp(),
+                        tabletIdSet,
+                        false /* not restore */);
+                newPartitions.add(newPartition);
+            }
+        } catch (DdlException e) {
+            // create partition failed, remove all newly created tablets
+            for (Long tabletId : tabletIdSet) {
+                Catalog.getCurrentInvertedIndex().deleteTablet(tabletId);
+            }
+            throw e;
+        }
+        Preconditions.checkState(origPartitions.size() == 
newPartitions.size());
+
+        // all partitions are created successfully, try to replace the old 
partitions.
+        // before replacing, we need to check again.
+        // Things may be changed outside the database lock.
+        db.writeLock();
+        try {
+            OlapTable olapTable = (OlapTable) db.getTable(copiedTbl.getId());
+            if (olapTable == null) {
+                throw new DdlException("Table[" + copiedTbl.getName() + "] is 
dropped");
+            }
+            
+            if (olapTable.getState() != OlapTableState.NORMAL) {
+                throw new DdlException("Table' state is not NORMAL: " + 
olapTable.getState());
+            }
+
+            // check partitions
+            for (Map.Entry<String, Long> entry : origPartitions.entrySet()) {
+                Partition partition = copiedTbl.getPartition(entry.getValue());
+                if (partition == null || 
!partition.getName().equals(entry.getKey())) {
+                    throw new DdlException("Partition [" + entry.getKey() + "] 
is changed");
+                }
+            }
+
+            // check if meta changed
+            // rollup index may be added or dropped, and schema may be changed 
during creating partition operation.
+            boolean metaChanged = false;
+            if (olapTable.getIndexNameToId().size() != 
copiedTbl.getIndexNameToId().size()) {
+                metaChanged = true;
+            } else {
+                // compare schemaHash
+                Map<Long, Integer> copiedIndexIdToSchemaHash = 
copiedTbl.getIndexIdToSchemaHash();
+                for (Map.Entry<Long, Integer> entry : 
olapTable.getIndexIdToSchemaHash().entrySet()) {
+                    long indexId = entry.getKey();
+                    if (!copiedIndexIdToSchemaHash.containsKey(indexId)) {
+                        metaChanged = true;
+                        break;
+                    }
+                    if 
(!copiedIndexIdToSchemaHash.get(indexId).equals(entry.getValue())) {
+                        metaChanged = true;
+                        break;
+                    }
+                }
+            }
+
+            if (metaChanged) {
+                throw new DdlException("Table[" + copiedTbl.getName() + "]'s 
meta has been changed. try again.");
+            }
+
+            // replace
+            truncateTableInternal(olapTable, newPartitions);
+
+            // write edit log
+            TruncateTableInfo info = new TruncateTableInfo(db.getId(), 
olapTable.getId(), newPartitions);
+            editLog.logTruncateTable(info);
+        } finally {
+            db.writeUnlock();
+        }
+        
+        LOG.info("finished to truncate table {}, partitions: {}",
+                tblRef.getName().toSql(), tblRef.getPartitions());
+    }
+
+    private void truncateTableInternal(OlapTable olapTable, List<Partition> 
newPartitions) {
+        // use new partitions to replace the old ones.
+        Set<Long> oldTabletIds = Sets.newHashSet();
+        for (Partition newPartition : newPartitions) {
+            Partition oldPartition = olapTable.replacePartition(newPartition);
+            // save old tablets to be removed
+            for (MaterializedIndex index : 
oldPartition.getMaterializedIndices()) {
+                index.getTablets().stream().forEach(t -> {
+                    oldTabletIds.add(t.getId());
+                });
+            }
+        }
+
+        // remove the tablets in old partitions
+        for (Long tabletId : oldTabletIds) {
+            Catalog.getCurrentInvertedIndex().deleteTablet(tabletId);
+        }
+    }
+
+    public void replayTruncateTable(TruncateTableInfo info) {
+        Database db = getDb(info.getDbId());
+        db.writeLock();
+        try {
+            OlapTable olapTable = (OlapTable) db.getTable(info.getTblId());
+            truncateTableInternal(olapTable, info.getPartitions());
+
+            // if this is checkpoint thread, no need to handle inverted index
+            // because tablet and replica info are already in catalog, and 
inverted index will be rebuild
+            // when loading image
+            if (!Catalog.isCheckpointThread()) {
+                // add tablet to inverted index
+                TabletInvertedIndex invertedIndex = 
Catalog.getCurrentInvertedIndex();
+                for (Partition partition : info.getPartitions()) {
+                    long partitionId = partition.getId();
+                    for (MaterializedIndex mIndex : 
partition.getMaterializedIndices()) {
+                        long indexId = mIndex.getId();
+                        int schemaHash = 
olapTable.getSchemaHashByIndexId(indexId);
+                        TabletMeta tabletMeta = new TabletMeta(db.getId(), 
olapTable.getId(),
+                                partitionId, indexId, schemaHash);
+                        for (Tablet tablet : mIndex.getTablets()) {
+                            long tabletId = tablet.getId();
+                            invertedIndex.addTablet(tabletId, tabletMeta);
+                            for (Replica replica : tablet.getReplicas()) {
+                                invertedIndex.addReplica(tabletId, replica);
+                            }
+                        }
+                    }
+                }
+            }
+        } finally {
+            db.writeUnlock();
+        }
+    }
 }
 
diff --git a/fe/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java 
b/fe/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
index ef370e0b..6f00557d 100644
--- a/fe/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
+++ b/fe/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
@@ -558,7 +558,7 @@ public synchronized void replayRecoverPartition(OlapTable 
table, long partitionI
     // no need to use synchronized.
     // only called when loading image
     public void addTabletToInvertedIndex() {
-        // no need to handle idToDatabase. Databse is already empty before 
being put here
+        // no need to handle idToDatabase. Database is already empty before 
being put here
 
         TabletInvertedIndex invertedIndex = Catalog.getCurrentInvertedIndex();
         // idToTable
@@ -595,12 +595,12 @@ public void addTabletToInvertedIndex() {
             Partition partition = partitionInfo.getPartition();
             long partitionId = partition.getId();
 
-            // we need to get olaptable to get schema hash info
+            // we need to get olap table to get schema hash info
             // first find it in catalog. if not found, it should be in recycle 
bin
             OlapTable olapTable = null;
             Database db = Catalog.getInstance().getDb(dbId);
             if (db == null) {
-                // just log. db should be in recyle bin
+                // just log. db should be in recycle bin
                 if (!idToDatabase.containsKey(dbId)) {
                     LOG.error("db[{}] is neither in catalog nor in recylce bin"
                             + " when rebuilding inverted index from recycle 
bin, partition[{}]",
diff --git a/fe/src/main/java/org/apache/doris/catalog/OlapTable.java 
b/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
index 96c13738..b54e0383 100644
--- a/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -25,9 +25,6 @@
 import org.apache.doris.analysis.AlterTableStmt;
 import org.apache.doris.analysis.CreateTableStmt;
 import org.apache.doris.analysis.DistributionDesc;
-import org.apache.doris.analysis.KeysDesc;
-import org.apache.doris.analysis.PartitionDesc;
-import org.apache.doris.analysis.RangePartitionDesc;
 import org.apache.doris.analysis.SingleRangePartitionDesc;
 import org.apache.doris.analysis.TableName;
 import org.apache.doris.backup.Status;
@@ -44,10 +41,10 @@
 import org.apache.doris.thrift.TTableDescriptor;
 import org.apache.doris.thrift.TTableType;
 
-import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import com.google.common.collect.Range;
 import com.google.common.collect.Sets;
 
 import org.apache.logging.log4j.LogManager;
@@ -885,4 +882,33 @@ public OlapTable selectiveCopy(Collection<String> 
reservedPartNames) {
         
         return copied;
     }
+
+    /*
+     * this method is currently used for truncating table(partitions).
+     * the new partition has new id, so we need to change all 'id-related' 
members
+     *
+     * return the old partition.
+     */
+    public Partition replacePartition(Partition newPartition) {
+        Partition oldPartition = 
nameToPartition.remove(newPartition.getName());
+        idToPartition.remove(oldPartition.getId());
+
+        idToPartition.put(newPartition.getId(), newPartition);
+        nameToPartition.put(newPartition.getName(), newPartition);
+
+        DataProperty dataProperty = 
partitionInfo.getDataProperty(oldPartition.getId());
+        short replicationNum = 
partitionInfo.getReplicationNum(oldPartition.getId());
+
+        if (partitionInfo.getType() == PartitionType.RANGE) {
+            RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) 
partitionInfo;
+            Range<PartitionKey> range = 
rangePartitionInfo.getRange(oldPartition.getId());
+            rangePartitionInfo.dropPartition(oldPartition.getId());
+            rangePartitionInfo.addPartition(newPartition.getId(), range, 
dataProperty, replicationNum);
+        } else {
+            partitionInfo.dropPartition(oldPartition.getId());
+            partitionInfo.addPartition(newPartition.getId(), dataProperty, 
replicationNum);
+        }
+
+        return oldPartition;
+    }
 }
diff --git a/fe/src/main/java/org/apache/doris/catalog/PartitionInfo.java 
b/fe/src/main/java/org/apache/doris/catalog/PartitionInfo.java
index bc557de3..3028ce8a 100644
--- a/fe/src/main/java/org/apache/doris/catalog/PartitionInfo.java
+++ b/fe/src/main/java/org/apache/doris/catalog/PartitionInfo.java
@@ -71,6 +71,16 @@ public void setReplicationNum(long partitionId, short 
replicationNum) {
         idToReplicationNum.put(partitionId, replicationNum);
     }
 
+    public void dropPartition(long partitionId) {
+        idToDataProperty.remove(partitionId);
+        idToReplicationNum.remove(partitionId);
+    }
+
+    public void addPartition(long partitionId, DataProperty dataProperty, 
short replicationNum) {
+        idToDataProperty.put(partitionId, dataProperty);
+        idToReplicationNum.put(partitionId, replicationNum);
+    }
+
     public static PartitionInfo read(DataInput in) throws IOException {
         PartitionInfo partitionInfo = new PartitionInfo();
         partitionInfo.readFields(in);
diff --git a/fe/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java 
b/fe/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
index 22c5f1d5..118cb22e 100644
--- a/fe/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
+++ b/fe/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
@@ -79,17 +79,16 @@ public RangePartitionInfo(List<Column> partitionColumns) {
         return partitionColumns;
     }
 
+    @Override
     public void dropPartition(long partitionId) {
+        super.dropPartition(partitionId);
         idToRange.remove(partitionId);
-        idToDataProperty.remove(partitionId);
-        idToReplicationNum.remove(partitionId);
     }
 
-    public void addPartitionForRestore(long partitionId, Range<PartitionKey> 
range, DataProperty dataProperty,
+    public void addPartition(long partitionId, Range<PartitionKey> range, 
DataProperty dataProperty,
             short replicationNum) {
+        addPartition(partitionId, dataProperty, replicationNum);
         idToRange.put(partitionId, range);
-        idToDataProperty.put(partitionId, dataProperty);
-        idToReplicationNum.put(partitionId, replicationNum);
     }
 
     public Range<PartitionKey> checkAndCreateRange(SingleRangePartitionDesc 
desc) throws DdlException {
diff --git a/fe/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java 
b/fe/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
index 661b44cd..d6e15899 100644
--- a/fe/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
+++ b/fe/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
@@ -331,8 +331,8 @@ private boolean checkNeedRecover(Replica replicaMeta, long 
backendVersion, long
         return false;
     }
 
+    // always add tablet before adding replicas
     public void addTablet(long tabletId, TabletMeta tabletMeta) {
-        // always add tablet before adding replicas
         if (Catalog.isCheckpointThread()) {
             return;
         }
diff --git a/fe/src/main/java/org/apache/doris/catalog/TabletStatMgr.java 
b/fe/src/main/java/org/apache/doris/catalog/TabletStatMgr.java
index ee554057..1d327347 100644
--- a/fe/src/main/java/org/apache/doris/catalog/TabletStatMgr.java
+++ b/fe/src/main/java/org/apache/doris/catalog/TabletStatMgr.java
@@ -146,9 +146,13 @@ private void updateTabletStat(Long beId, TTabletStatResult 
result) {
         TabletInvertedIndex invertedIndex = Catalog.getCurrentInvertedIndex();
 
         for (Map.Entry<Long, TTabletStat> entry : 
result.getTablets_stats().entrySet()) {
+            if (invertedIndex.getTabletMeta(entry.getKey()) == null) {
+                // the replica is obsolete, ignore it.
+                continue;
+            }
             Replica replica = invertedIndex.getReplica(entry.getKey(), beId);
             if (replica == null) {
-                // replica may be deleted from catalog
+                // replica may be deleted from catalog, ignore it.
                 continue;
             }
             // TODO(cmy) no db lock protected. I think it is ok even we get 
wrong row num
diff --git a/fe/src/main/java/org/apache/doris/journal/JournalEntity.java 
b/fe/src/main/java/org/apache/doris/journal/JournalEntity.java
index 29c0544b..70888ee2 100644
--- a/fe/src/main/java/org/apache/doris/journal/JournalEntity.java
+++ b/fe/src/main/java/org/apache/doris/journal/JournalEntity.java
@@ -56,6 +56,7 @@
 import org.apache.doris.persist.RecoverInfo;
 import org.apache.doris.persist.ReplicaPersistInfo;
 import org.apache.doris.persist.TableInfo;
+import org.apache.doris.persist.TruncateTableInfo;
 import org.apache.doris.qe.SessionVariable;
 import org.apache.doris.system.Backend;
 import org.apache.doris.system.Frontend;
@@ -366,6 +367,11 @@ public void readFields(DataInput in) throws IOException {
                 data = new Text();
                 break;
             }
+            case OperationType.OP_TRUNCATE_TABLE: {
+                data = TruncateTableInfo.read(in);
+                needRead = false;
+                break;
+            }
             default: {
                 IOException e = new IOException();
                 LOG.error("UNKNOWN Operation Type {}", opCode, e);
diff --git a/fe/src/main/java/org/apache/doris/persist/EditLog.java 
b/fe/src/main/java/org/apache/doris/persist/EditLog.java
index 63d3c593..ac48b1ba 100644
--- a/fe/src/main/java/org/apache/doris/persist/EditLog.java
+++ b/fe/src/main/java/org/apache/doris/persist/EditLog.java
@@ -57,6 +57,7 @@
 import org.apache.doris.system.Backend;
 import org.apache.doris.system.Frontend;
 import org.apache.doris.transaction.TransactionState;
+
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
@@ -579,14 +580,14 @@ public static void loadJournal(Catalog catalog, 
JournalEntity journal) {
                 }
                 case OperationType.OP_UPSERT_TRANSACTION_STATE: {
                     final TransactionState state = (TransactionState) 
journal.getData();
-                    
catalog.getCurrentGlobalTransactionMgr().replayUpsertTransactionState(state);
+                    
Catalog.getCurrentGlobalTransactionMgr().replayUpsertTransactionState(state);
                     LOG.debug("opcode: {}, tid: {}", opCode, 
state.getTransactionId());
 
                     break;
                 }
                 case OperationType.OP_DELETE_TRANSACTION_STATE: {
                     final TransactionState state = (TransactionState) 
journal.getData();
-                    
catalog.getCurrentGlobalTransactionMgr().replayDeleteTransactionState(state);
+                    
Catalog.getCurrentGlobalTransactionMgr().replayDeleteTransactionState(state);
                     LOG.debug("opcode: {}, tid: {}", opCode, 
state.getTransactionId());
                     break;
                 }
@@ -600,6 +601,11 @@ public static void loadJournal(Catalog catalog, 
JournalEntity journal) {
                     
catalog.getBackupHandler().getRepoMgr().removeRepo(repoName, true);
                     break;
                 }
+                case OperationType.OP_TRUNCATE_TABLE: {
+                    TruncateTableInfo info = (TruncateTableInfo) 
journal.getData();
+                    catalog.replayTruncateTable(info);
+                    break;
+                }
                 default: {
                     IOException e = new IOException();
                     LOG.error("UNKNOWN Operation Type {}", opCode, e);
@@ -1054,4 +1060,8 @@ public void logRestoreJob(RestoreJob job) {
     public void logUpdateUserProperty(UserPropertyInfo propertyInfo) {
         logEdit(OperationType.OP_UPDATE_USER_PROPERTY, propertyInfo);
     }
+
+    public void logTruncateTable(TruncateTableInfo info) {
+        logEdit(OperationType.OP_TRUNCATE_TABLE, info);
+    }
 }
diff --git a/fe/src/main/java/org/apache/doris/persist/OperationType.java 
b/fe/src/main/java/org/apache/doris/persist/OperationType.java
index 491ab4c7..6fdad60b 100644
--- a/fe/src/main/java/org/apache/doris/persist/OperationType.java
+++ b/fe/src/main/java/org/apache/doris/persist/OperationType.java
@@ -51,6 +51,7 @@
     public static final short OP_RESTORE_FINISH = 115;
     public static final short OP_BACKUP_JOB = 116;
     public static final short OP_RESTORE_JOB = 117;
+    public static final short OP_TRUNCATE_TABLE = 118;
 
     // 20~29 120~129 220~229 ...
     public static final short OP_START_ROLLUP = 20;
diff --git a/fe/src/main/java/org/apache/doris/persist/TruncateTableInfo.java 
b/fe/src/main/java/org/apache/doris/persist/TruncateTableInfo.java
new file mode 100644
index 00000000..bad83e9c
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/persist/TruncateTableInfo.java
@@ -0,0 +1,85 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.persist;
+
+import org.apache.doris.catalog.Partition;
+import org.apache.doris.common.io.Writable;
+
+import com.google.common.collect.Lists;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.List;
+
+public class TruncateTableInfo implements Writable {
+
+    private long dbId;
+    private long tblId;
+    private List<Partition> partitions = Lists.newArrayList();
+
+    private TruncateTableInfo() {
+
+    }
+
+    public TruncateTableInfo(long dbId, long tblId, List<Partition> 
partitions) {
+        this.dbId = dbId;
+        this.tblId = tblId;
+        this.partitions = partitions;
+    }
+
+    public long getDbId() {
+        return dbId;
+    }
+
+    public long getTblId() {
+        return tblId;
+    }
+
+    public List<Partition> getPartitions() {
+        return partitions;
+    }
+
+    public static TruncateTableInfo read(DataInput in) throws IOException {
+        TruncateTableInfo info = new TruncateTableInfo();
+        info.readFields(in);
+        return info;
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        out.writeLong(dbId);
+        out.writeLong(tblId);
+        out.writeInt(partitions.size());
+        for (Partition partition : partitions) {
+            partition.write(out);
+        }
+    }
+
+    @Override
+    public void readFields(DataInput in) throws IOException {
+        dbId = in.readLong();
+        tblId = in.readLong();
+        int size = in.readInt();
+        for (int i = 0; i < size; i++) {
+            Partition partition = Partition.read(in);
+            partitions.add(partition);
+        }
+    }
+
+}
diff --git a/fe/src/main/java/org/apache/doris/qe/DdlExecutor.java 
b/fe/src/main/java/org/apache/doris/qe/DdlExecutor.java
index 1ef1d33b..778e05ef 100644
--- a/fe/src/main/java/org/apache/doris/qe/DdlExecutor.java
+++ b/fe/src/main/java/org/apache/doris/qe/DdlExecutor.java
@@ -53,6 +53,7 @@
 import org.apache.doris.analysis.RevokeStmt;
 import org.apache.doris.analysis.SetUserPropertyStmt;
 import org.apache.doris.analysis.SyncStmt;
+import org.apache.doris.analysis.TruncateTableStmt;
 import org.apache.doris.catalog.Catalog;
 import org.apache.doris.common.Config;
 import org.apache.doris.common.DdlException;
@@ -151,6 +152,8 @@ public static void execute(Catalog catalog, DdlStmt 
ddlStmt) throws DdlException
             catalog.getBackupHandler().dropRepository((DropRepositoryStmt) 
ddlStmt);
         } else if (ddlStmt instanceof SyncStmt) {
             return;
+        } else if (ddlStmt instanceof TruncateTableStmt) {
+            catalog.truncateTable((TruncateTableStmt) ddlStmt);
         } else {
             throw new DdlException("Unknown statement.");
         }
diff --git a/fe/src/main/jflex/sql_scanner.flex 
b/fe/src/main/jflex/sql_scanner.flex
index bdc878b3..701b2db4 100644
--- a/fe/src/main/jflex/sql_scanner.flex
+++ b/fe/src/main/jflex/sql_scanner.flex
@@ -284,6 +284,7 @@ import org.apache.doris.common.util.SqlUtils;
         keywordMap.put("triggers", new Integer(SqlParserSymbols.KW_TRIGGERS));
         keywordMap.put("trim", new Integer(SqlParserSymbols.KW_TRIM));
         keywordMap.put("true", new Integer(SqlParserSymbols.KW_TRUE));
+        keywordMap.put("truncate", new Integer(SqlParserSymbols.KW_TRUNCATE));
         keywordMap.put("type", new Integer(SqlParserSymbols.KW_TYPE));
         keywordMap.put("types", new Integer(SqlParserSymbols.KW_TYPES));
         keywordMap.put("unbounded", new 
Integer(SqlParserSymbols.KW_UNBOUNDED));


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to