This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git


The following commit(s) were added to refs/heads/master by this push:
     new bd5d68f0f refact(rocksdb): clean & reformat some code (#2200)
bd5d68f0f is described below

commit bd5d68f0f2cca3470473453e7d606472fb77e0c6
Author: imbajin <[email protected]>
AuthorDate: Mon Dec 11 22:21:08 2023 +0800

    refact(rocksdb): clean & reformat some code (#2200)
    
    
    * chore: merge master to clean-rocksdb for synchronization (#2383)
    
    ---------
    
    Co-authored-by: V_Galaxy <[email protected]>
---
 .../hugegraph/backend/store/BackendTable.java      |  14 +-
 .../backend/store/rocksdb/OpenedRocksDB.java       |  20 +-
 .../backend/store/rocksdb/RocksDBIngester.java     |  14 +-
 .../backend/store/rocksdb/RocksDBIteratorPool.java |  20 +-
 .../backend/store/rocksdb/RocksDBMetrics.java      |  79 +++----
 .../backend/store/rocksdb/RocksDBOptions.java      |  11 +-
 .../backend/store/rocksdb/RocksDBSessions.java     |  25 +--
 .../backend/store/rocksdb/RocksDBStdSessions.java  | 245 ++++++++-------------
 .../backend/store/rocksdb/RocksDBStore.java        | 113 ++++------
 .../backend/store/rocksdb/RocksDBTable.java        |  21 +-
 .../backend/store/rocksdb/RocksDBTables.java       |  14 +-
 .../store/rocksdbsst/RocksDBSstSessions.java       |  52 ++---
 .../backend/store/rocksdbsst/RocksDBSstStore.java  |  33 +--
 .../unit/rocksdb/BaseRocksDBUnitTest.java          |  17 +-
 .../unit/rocksdb/RocksDBCountersTest.java          |   9 +-
 .../hugegraph/unit/rocksdb/RocksDBPerfTest.java    |  34 ++-
 .../hugegraph/unit/rocksdb/RocksDBSessionTest.java |  61 +++--
 .../unit/rocksdb/RocksDBSessionsTest.java          |  13 +-
 .../unit/serializer/BinaryBackendEntryTest.java    |   4 +-
 .../serializer/BinaryScatterSerializerTest.java    |   8 +-
 .../unit/serializer/BinarySerializerTest.java      |   5 +-
 21 files changed, 320 insertions(+), 492 deletions(-)

diff --git 
a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendTable.java
 
b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendTable.java
index 62d14782d..505739aef 100644
--- 
a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendTable.java
+++ 
b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendTable.java
@@ -33,6 +33,7 @@ import org.apache.hugegraph.util.Bytes;
 import org.apache.hugegraph.util.E;
 import org.apache.hugegraph.util.NumericUtil;
 import org.apache.hugegraph.util.StringEncoding;
+
 import com.google.common.collect.ImmutableList;
 
 public abstract class BackendTable<Session extends BackendSession, Entry> {
@@ -91,7 +92,8 @@ public abstract class BackendTable<Session extends 
BackendSession, Entry> {
     }
 
     /**
-     *  Mapping query-type to table-type
+     * Mapping query-type to table-type
+     *
      * @param query origin query
      * @return corresponding table type
      */
@@ -231,12 +233,11 @@ public abstract class BackendTable<Session extends 
BackendSession, Entry> {
 
         public static class Range {
 
-            private byte[] startKey;
-            private byte[] endKey;
+            private final byte[] startKey;
+            private final byte[] endKey;
 
             public Range(byte[] startKey, byte[] endKey) {
-                this.startKey = Arrays.equals(EMPTY, startKey) ?
-                                START_BYTES : startKey;
+                this.startKey = Arrays.equals(EMPTY, startKey) ? START_BYTES : 
startKey;
                 this.endKey = Arrays.equals(EMPTY, endKey) ? END_BYTES : 
endKey;
             }
 
@@ -361,8 +362,7 @@ public abstract class BackendTable<Session extends 
BackendSession, Entry> {
             private static byte[] align(byte[] array, int length) {
                 int len = array.length;
                 E.checkArgument(len <= length,
-                                "The length of array '%s' exceed " +
-                                "align length '%s'", len, length);
+                                "The length of array '%s' exceed align length 
'%s'", len, length);
                 byte[] target = new byte[length];
                 System.arraycopy(array, 0, target, length - len, len);
                 return target;
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/OpenedRocksDB.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/OpenedRocksDB.java
index 91e02878a..c62ab1211 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/OpenedRocksDB.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/OpenedRocksDB.java
@@ -27,17 +27,16 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.hugegraph.backend.BackendException;
+import 
org.apache.hugegraph.backend.store.rocksdb.RocksDBIteratorPool.ReusedRocksIterator;
+import org.apache.hugegraph.util.E;
+import org.apache.hugegraph.util.Log;
 import org.rocksdb.Checkpoint;
 import org.rocksdb.ColumnFamilyHandle;
 import org.rocksdb.RocksDB;
 import org.rocksdb.SstFileManager;
 import org.slf4j.Logger;
 
-import org.apache.hugegraph.backend.BackendException;
-import 
org.apache.hugegraph.backend.store.rocksdb.RocksDBIteratorPool.ReusedRocksIterator;
-import org.apache.hugegraph.util.E;
-import org.apache.hugegraph.util.Log;
-
 public class OpenedRocksDB implements AutoCloseable {
 
     private static final Logger LOG = Log.logger(OpenedRocksDB.class);
@@ -118,8 +117,7 @@ public class OpenedRocksDB implements AutoCloseable {
                                                     tempFile, snapshotFile));
             }
         } catch (Exception e) {
-            throw new BackendException("Failed to create checkpoint at path 
%s",
-                                       e, targetPath);
+            throw new BackendException("Failed to create checkpoint at path 
%s", e, targetPath);
         }
     }
 
@@ -137,8 +135,7 @@ public class OpenedRocksDB implements AutoCloseable {
         }
 
         public synchronized ColumnFamilyHandle get() {
-            E.checkState(this.handle.isOwningHandle(),
-                         "It seems CF has been closed");
+            E.checkState(this.handle.isOwningHandle(), "It seems CF has been 
closed");
             assert this.refs.get() >= 1;
             return this.handle;
         }
@@ -163,7 +160,7 @@ public class OpenedRocksDB implements AutoCloseable {
 
         public synchronized ColumnFamilyHandle waitForDrop() {
             assert this.refs.get() >= 1;
-            // When entering this method, the refs won't increase any more
+            // When entering this method, the refs won't increase anymore
             final long timeout = TimeUnit.MINUTES.toMillis(30L);
             final long unit = 100L;
             for (long i = 1; this.refs.get() > 1; i++) {
@@ -173,8 +170,7 @@ public class OpenedRocksDB implements AutoCloseable {
                     // 30s rest api timeout may cause InterruptedException
                 }
                 if (i * unit > timeout) {
-                    throw new BackendException("Timeout after %sms to drop CF",
-                                               timeout);
+                    throw new BackendException("Timeout after %sms to drop 
CF", timeout);
                 }
             }
             assert this.refs.get() == 1;
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBIngester.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBIngester.java
index ab89e19ef..fa30a389b 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBIngester.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBIngester.java
@@ -27,15 +27,14 @@ import java.nio.file.attribute.BasicFileAttributes;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hugegraph.backend.BackendException;
+import org.apache.hugegraph.util.Log;
 import org.rocksdb.ColumnFamilyHandle;
 import org.rocksdb.IngestExternalFileOptions;
 import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
 import org.slf4j.Logger;
 
-import org.apache.hugegraph.backend.BackendException;
-import org.apache.hugegraph.util.Log;
-
 public class RocksDBIngester {
 
     public static final String SST = ".sst";
@@ -52,8 +51,7 @@ public class RocksDBIngester {
         this.options.setMoveFiles(true);
     }
 
-    public List<String> ingest(Path path, ColumnFamilyHandle cf)
-                               throws RocksDBException {
+    public List<String> ingest(Path path, ColumnFamilyHandle cf) throws 
RocksDBException {
         SuffixFileVisitor visitor = new SuffixFileVisitor(SST);
         try {
             Files.walkFileTree(path, visitor);
@@ -74,10 +72,8 @@ public class RocksDBIngester {
         return ssts;
     }
 
-    public void ingest(ColumnFamilyHandle cf, List<String> ssts)
-                       throws RocksDBException {
-        LOG.info("Ingest sst files to CF '{}': {}",
-                 RocksDBStdSessions.decode(cf.getName()), ssts);
+    public void ingest(ColumnFamilyHandle cf, List<String> ssts) throws 
RocksDBException {
+        LOG.info("Ingest sst files to CF '{}': {}", 
RocksDBStdSessions.decode(cf.getName()), ssts);
         if (!ssts.isEmpty()) {
             this.rocksdb.ingestExternalFile(cf, ssts, this.options);
         }
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBIteratorPool.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBIteratorPool.java
index 7aad1407e..b4c6d3e2c 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBIteratorPool.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBIteratorPool.java
@@ -20,17 +20,16 @@ package org.apache.hugegraph.backend.store.rocksdb;
 import java.util.Queue;
 import java.util.concurrent.ArrayBlockingQueue;
 
+import org.apache.hugegraph.backend.BackendException;
+import org.apache.hugegraph.config.CoreOptions;
+import org.apache.hugegraph.util.Log;
+import org.apache.hugegraph.util.StringEncoding;
 import org.rocksdb.ColumnFamilyHandle;
 import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
 import org.rocksdb.RocksIterator;
 import org.slf4j.Logger;
 
-import org.apache.hugegraph.backend.BackendException;
-import org.apache.hugegraph.config.CoreOptions;
-import org.apache.hugegraph.util.Log;
-import org.apache.hugegraph.util.StringEncoding;
-
 public final class RocksDBIteratorPool implements AutoCloseable {
 
     private static final Logger LOG = Log.logger(RocksDBIteratorPool.class);
@@ -63,9 +62,8 @@ public final class RocksDBIteratorPool implements 
AutoCloseable {
 
     @Override
     public void close() {
-        LOG.debug("Close IteratorPool with pool size {} ({})",
-                  this.pool.size(), this);
-        for (RocksIterator iter; (iter = this.pool.poll()) != null;) {
+        LOG.debug("Close IteratorPool with pool size {} ({})", 
this.pool.size(), this);
+        for (RocksIterator iter; (iter = this.pool.poll()) != null; ) {
             this.closeIterator(iter);
         }
         assert this.pool.isEmpty();
@@ -149,13 +147,13 @@ public final class RocksDBIteratorPool implements 
AutoCloseable {
 
     protected final class ReusedRocksIterator {
 
-        private static final boolean EREUSING_ENABLED = false;
+        private static final boolean REUSING_ENABLED = false;
         private final RocksIterator iterator;
         private boolean closed;
 
         public ReusedRocksIterator() {
             this.closed = false;
-            if (EREUSING_ENABLED) {
+            if (REUSING_ENABLED) {
                 this.iterator = allocIterator();
             } else {
                 this.iterator = createIterator();
@@ -173,7 +171,7 @@ public final class RocksDBIteratorPool implements 
AutoCloseable {
             }
             this.closed = true;
 
-            if (EREUSING_ENABLED) {
+            if (REUSING_ENABLED) {
                 releaseIterator(this.iterator);
             } else {
                 closeIterator(this.iterator);
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBMetrics.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBMetrics.java
index 61462d6f8..6547eaf76 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBMetrics.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBMetrics.java
@@ -24,6 +24,7 @@ import org.apache.hugegraph.backend.store.BackendMetrics;
 import org.apache.hugegraph.util.Bytes;
 import org.apache.hugegraph.util.InsertionOrderUtil;
 import org.apache.hugegraph.util.UnitUtil;
+
 import com.google.common.collect.ImmutableMap;
 
 public class RocksDBMetrics implements BackendMetrics {
@@ -32,61 +33,37 @@ public class RocksDBMetrics implements BackendMetrics {
     private static final String PREFIX = "rocksdb.";
 
     // memory
-    private static final String BLOCK_CACHE = PREFIX +
-                                "block-cache-usage";
-    private static final String BLOCK_CACHE_PINNED = PREFIX +
-                                "block-cache-pinned-usage";
-    private static final String BLOCK_CACHE_CAPACITY = PREFIX +
-                                "block-cache-capacity";
-    private static final String INDEX_FILTER = PREFIX +
-                                "estimate-table-readers-mem";
-    private static final String ALL_MEM_TABLE = PREFIX +
-                                "size-all-mem-tables";
-    private static final String CUR_MEM_TABLE = PREFIX +
-                                "cur-size-all-mem-tables";
+    private static final String BLOCK_CACHE = PREFIX + "block-cache-usage";
+    private static final String BLOCK_CACHE_PINNED = PREFIX + 
"block-cache-pinned-usage";
+    private static final String BLOCK_CACHE_CAPACITY = PREFIX + 
"block-cache-capacity";
+    private static final String INDEX_FILTER = PREFIX + 
"estimate-table-readers-mem";
+    private static final String ALL_MEM_TABLE = PREFIX + "size-all-mem-tables";
+    private static final String CUR_MEM_TABLE = PREFIX + 
"cur-size-all-mem-tables";
     // disk
-    private static final String DISK_USAGE = PREFIX +
-                                "disk-usage";
-    private static final String LIVE_DATA_SIZE = PREFIX +
-                                "estimate-live-data-size";
-    private static final String SST_FILE_SIZE = PREFIX +
-                                "total-sst-files-size";
-    private static final String LIVE_SST_FILE_SIZE = PREFIX +
-                                "live-sst-files-size";
+    private static final String DISK_USAGE = PREFIX + "disk-usage";
+    private static final String LIVE_DATA_SIZE = PREFIX + 
"estimate-live-data-size";
+    private static final String SST_FILE_SIZE = PREFIX + 
"total-sst-files-size";
+    private static final String LIVE_SST_FILE_SIZE = PREFIX + 
"live-sst-files-size";
     private static final String PENDING_COMPACTION_BYTES = PREFIX +
-                                "estimate-pending-compaction-bytes";
+                                                           
"estimate-pending-compaction-bytes";
 
     // count/number
-    private static final String NUM_KEYS = PREFIX +
-                                "estimate-num-keys";
-    private static final String NUM_KEYS_MEM_TABLE = PREFIX +
-                                "num-entries-active-mem-table";
-    private static final String NUM_KEYS_IMM_MEM_TABLE = PREFIX +
-                                "num-entries-imm-mem-tables";
-    private static final String NUM_DELETES_MEM_TABLE = PREFIX +
-                                "num-deletes-active-mem-table";
-    private static final String NUM_DELETES_IMM_MEM_TABLE = PREFIX +
-                                "num-deletes-imm-mem-tables";
-
-    private static final String RUNNING_FLUSHS = PREFIX +
-                                "num-running-flushes";
-    private static final String MEM_TABLE_FLUSH_PENDINF = PREFIX +
-                                "mem-table-flush-pending";
-    private static final String RUNNING_COMPACTIONS = PREFIX +
-                                "num-running-compactions";
-    private static final String COMPACTION_PENDINF = PREFIX +
-                                "compaction-pending";
-
-    private static final String NUM_IMM_MEM_TABLE = PREFIX +
-                                "num-immutable-mem-table";
-    private static final String NUM_SNAPSHOTS = PREFIX +
-                                "num-snapshots";
-    private static final String OLDEST_SNAPSHOT_TIME = PREFIX +
-                                "oldest-snapshot-time";
-    private static final String NUM_LIVE_VERSIONS = PREFIX +
-                                "num-live-versions";
-    private static final String SUPER_VERSION = PREFIX +
-                                "current-super-version-number";
+    private static final String NUM_KEYS = PREFIX + "estimate-num-keys";
+    private static final String NUM_KEYS_MEM_TABLE = PREFIX + 
"num-entries-active-mem-table";
+    private static final String NUM_KEYS_IMM_MEM_TABLE = PREFIX + 
"num-entries-imm-mem-tables";
+    private static final String NUM_DELETES_MEM_TABLE = PREFIX + 
"num-deletes-active-mem-table";
+    private static final String NUM_DELETES_IMM_MEM_TABLE = PREFIX + 
"num-deletes-imm-mem-tables";
+
+    private static final String RUNNING_FLUSHS = PREFIX + 
"num-running-flushes";
+    private static final String MEM_TABLE_FLUSH_PENDINF = PREFIX + 
"mem-table-flush-pending";
+    private static final String RUNNING_COMPACTIONS = PREFIX + 
"num-running-compactions";
+    private static final String COMPACTION_PENDINF = PREFIX + 
"compaction-pending";
+
+    private static final String NUM_IMM_MEM_TABLE = PREFIX + 
"num-immutable-mem-table";
+    private static final String NUM_SNAPSHOTS = PREFIX + "num-snapshots";
+    private static final String OLDEST_SNAPSHOT_TIME = PREFIX + 
"oldest-snapshot-time";
+    private static final String NUM_LIVE_VERSIONS = PREFIX + 
"num-live-versions";
+    private static final String SUPER_VERSION = PREFIX + 
"current-super-version-number";
 
     public static final String KEY_DISK_USAGE = DISK_USAGE;
     public static final String KEY_NUM_KEYS = NUM_KEYS;
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBOptions.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBOptions.java
index a696b6cc3..cb0b74a5d 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBOptions.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBOptions.java
@@ -23,17 +23,17 @@ import static 
org.apache.hugegraph.config.OptionChecker.inValues;
 import static org.apache.hugegraph.config.OptionChecker.rangeDouble;
 import static org.apache.hugegraph.config.OptionChecker.rangeInt;
 
-import org.rocksdb.CompactionStyle;
-import org.rocksdb.CompressionType;
-import org.rocksdb.DataBlockIndexType;
-import org.rocksdb.IndexType;
-
 import org.apache.hugegraph.config.ConfigConvOption;
 import org.apache.hugegraph.config.ConfigListConvOption;
 import org.apache.hugegraph.config.ConfigListOption;
 import org.apache.hugegraph.config.ConfigOption;
 import org.apache.hugegraph.config.OptionHolder;
 import org.apache.hugegraph.util.Bytes;
+import org.rocksdb.CompactionStyle;
+import org.rocksdb.CompressionType;
+import org.rocksdb.DataBlockIndexType;
+import org.rocksdb.IndexType;
+
 import com.google.common.collect.ImmutableList;
 
 public class RocksDBOptions extends OptionHolder {
@@ -52,6 +52,7 @@ public class RocksDBOptions extends OptionHolder {
         return instance;
     }
 
+    // TODO: the entire align style is wrong, change it to 4 space later
     public static final ConfigOption<String> DATA_PATH =
             new ConfigOption<>(
                     "rocksdb.data_path",
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBSessions.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBSessions.java
index 8614d6b73..474f55db8 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBSessions.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBSessions.java
@@ -21,12 +21,11 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.lang3.tuple.Pair;
-import org.rocksdb.RocksDBException;
-
 import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator;
 import 
org.apache.hugegraph.backend.store.BackendSession.AbstractBackendSession;
 import org.apache.hugegraph.backend.store.BackendSessionPool;
 import org.apache.hugegraph.config.HugeConfig;
+import org.rocksdb.RocksDBException;
 
 public abstract class RocksDBSessions extends BackendSessionPool {
 
@@ -46,8 +45,7 @@ public abstract class RocksDBSessions extends 
BackendSessionPool {
 
     public abstract void compactRange();
 
-    public abstract RocksDBSessions copy(HugeConfig config,
-                                         String database, String store);
+    public abstract RocksDBSessions copy(HugeConfig config, String database, 
String store);
 
     public abstract void createSnapshot(String snapshotPath);
 
@@ -55,8 +53,7 @@ public abstract class RocksDBSessions extends 
BackendSessionPool {
 
     public abstract String buildSnapshotPath(String snapshotPrefix);
 
-    public abstract String hardLinkSnapshot(String snapshotPath)
-                                            throws RocksDBException;
+    public abstract String hardLinkSnapshot(String snapshotPath) throws 
RocksDBException;
 
     public abstract void reloadRocksDB() throws RocksDBException;
 
@@ -105,22 +102,16 @@ public abstract class RocksDBSessions extends 
BackendSessionPool {
 
         public abstract byte[] get(String table, byte[] key);
 
-        public abstract BackendColumnIterator get(String table,
-                                                  List<byte[]> keys);
+        public abstract BackendColumnIterator get(String table, List<byte[]> 
keys);
 
         public abstract BackendColumnIterator scan(String table);
 
-        public abstract BackendColumnIterator scan(String table,
-                                                   byte[] prefix);
+        public abstract BackendColumnIterator scan(String table, byte[] 
prefix);
 
-        public abstract BackendColumnIterator scan(String table,
-                                                   byte[] keyFrom,
-                                                   byte[] keyTo,
-                                                   int scanType);
+        public abstract BackendColumnIterator scan(String table, byte[] 
keyFrom,
+                                                   byte[] keyTo, int scanType);
 
-        public BackendColumnIterator scan(String table,
-                                          byte[] keyFrom,
-                                          byte[] keyTo) {
+        public BackendColumnIterator scan(String table, byte[] keyFrom, byte[] 
keyTo) {
             return this.scan(table, keyFrom, keyTo, SCAN_LT_END);
         }
 
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBStdSessions.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBStdSessions.java
index bcbe37b7c..15f904d6e 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBStdSessions.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBStdSessions.java
@@ -32,6 +32,18 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hugegraph.backend.BackendException;
+import org.apache.hugegraph.backend.serializer.BinarySerializer;
+import org.apache.hugegraph.backend.store.BackendEntry.BackendColumn;
+import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator;
+import org.apache.hugegraph.backend.store.BackendEntryIterator;
+import 
org.apache.hugegraph.backend.store.rocksdb.RocksDBIteratorPool.ReusedRocksIterator;
+import org.apache.hugegraph.config.CoreOptions;
+import org.apache.hugegraph.config.HugeConfig;
+import org.apache.hugegraph.util.Bytes;
+import org.apache.hugegraph.util.E;
+import org.apache.hugegraph.util.Log;
+import org.apache.hugegraph.util.StringEncoding;
 import org.rocksdb.BlockBasedTableConfig;
 import org.rocksdb.BloomFilter;
 import org.rocksdb.ColumnFamilyDescriptor;
@@ -57,18 +69,6 @@ import org.rocksdb.WriteBatch;
 import org.rocksdb.WriteOptions;
 import org.slf4j.Logger;
 
-import org.apache.hugegraph.backend.BackendException;
-import org.apache.hugegraph.backend.serializer.BinarySerializer;
-import org.apache.hugegraph.backend.store.BackendEntry.BackendColumn;
-import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator;
-import org.apache.hugegraph.backend.store.BackendEntryIterator;
-import 
org.apache.hugegraph.backend.store.rocksdb.RocksDBIteratorPool.ReusedRocksIterator;
-import org.apache.hugegraph.config.CoreOptions;
-import org.apache.hugegraph.config.HugeConfig;
-import org.apache.hugegraph.util.Bytes;
-import org.apache.hugegraph.util.E;
-import org.apache.hugegraph.util.Log;
-import org.apache.hugegraph.util.StringEncoding;
 import com.google.common.collect.ImmutableList;
 
 public class RocksDBStdSessions extends RocksDBSessions {
@@ -83,14 +83,12 @@ public class RocksDBStdSessions extends RocksDBSessions {
     private final AtomicInteger refCount;
 
     public RocksDBStdSessions(HugeConfig config, String database, String store,
-                              String dataPath, String walPath)
-                              throws RocksDBException {
+                              String dataPath, String walPath) throws 
RocksDBException {
         super(config, database, store);
         this.config = config;
         this.dataPath = dataPath;
         this.walPath = walPath;
-        this.rocksdb = RocksDBStdSessions.openRocksDB(config, dataPath,
-                                                      walPath);
+        this.rocksdb = RocksDBStdSessions.openRocksDB(config, dataPath, 
walPath);
         this.refCount = new AtomicInteger(1);
     }
 
@@ -101,8 +99,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
         this.config = config;
         this.dataPath = dataPath;
         this.walPath = walPath;
-        this.rocksdb = RocksDBStdSessions.openRocksDB(config, cfNames,
-                                                      dataPath, walPath);
+        this.rocksdb = RocksDBStdSessions.openRocksDB(config, cfNames, 
dataPath, walPath);
         this.refCount = new AtomicInteger(1);
 
         this.ingestExternalFile();
@@ -166,8 +163,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
     }
 
     @Override
-    public synchronized void dropTable(String... tables)
-                                       throws RocksDBException {
+    public synchronized void dropTable(String... tables) throws 
RocksDBException {
         this.checkValid();
 
         /*
@@ -210,10 +206,8 @@ public class RocksDBStdSessions extends RocksDBSessions {
         if (this.rocksdb.isOwningHandle()) {
             this.rocksdb.close();
         }
-        this.rocksdb = RocksDBStdSessions.openRocksDB(this.config,
-                                                      ImmutableList.of(),
-                                                      this.dataPath,
-                                                      this.walPath);
+        this.rocksdb = RocksDBStdSessions.openRocksDB(this.config, 
ImmutableList.of(),
+                                                      this.dataPath, 
this.walPath);
     }
 
     @Override
@@ -252,8 +246,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
     }
 
     @Override
-    public RocksDBSessions copy(HugeConfig config,
-                                String database, String store) {
+    public RocksDBSessions copy(HugeConfig config, String database, String 
store) {
         return new RocksDBStdSessions(config, database, store, this);
     }
 
@@ -281,8 +274,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
             }
             // Move snapshot directory to origin data directory
             FileUtils.moveDirectory(snapshotDir, originDataDir);
-            LOG.info("Move snapshot directory {} to {}",
-                     snapshotDir, originDataDir);
+            LOG.info("Move snapshot directory {} to {}", snapshotDir, 
originDataDir);
             // Reload rocksdb instance
             this.reloadRocksDB();
         } catch (Exception e) {
@@ -299,24 +291,20 @@ public class RocksDBStdSessions extends RocksDBSessions {
         // Like: rocksdb-data/*
         Path pureDataPath = 
parentParentPath.relativize(originDataPath.toAbsolutePath());
         // Like: parent_path/snapshot_rocksdb-data/*
-        Path snapshotPath = parentParentPath.resolve(snapshotPrefix + "_" +
-                                                     pureDataPath);
+        Path snapshotPath = parentParentPath.resolve(snapshotPrefix + "_" + 
pureDataPath);
         E.checkArgument(snapshotPath.toFile().exists(),
-                        "The snapshot path '%s' doesn't exist",
-                        snapshotPath);
+                        "The snapshot path '%s' doesn't exist", snapshotPath);
         return snapshotPath.toString();
     }
 
     @Override
     public String hardLinkSnapshot(String snapshotPath) throws 
RocksDBException {
         String snapshotLinkPath = this.dataPath + "_temp";
-        try (OpenedRocksDB rocksdb = openRocksDB(this.config,
-                                                 ImmutableList.of(),
+        try (OpenedRocksDB rocksdb = openRocksDB(this.config, 
ImmutableList.of(),
                                                  snapshotPath, null)) {
             rocksdb.createCheckpoint(snapshotLinkPath);
         }
-        LOG.info("The snapshot {} has been hard linked to {}",
-                 snapshotPath, snapshotLinkPath);
+        LOG.info("The snapshot {} has been hard linked to {}", snapshotPath, 
snapshotLinkPath);
         return snapshotLinkPath;
     }
 
@@ -327,8 +315,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
 
     @Override
     protected final Session newSession() {
-        E.checkState(this.rocksdb.isOwningHandle(),
-                     "RocksDB has not been initialized");
+        E.checkState(this.rocksdb.isOwningHandle(), "RocksDB has not been 
initialized");
         return new StdSession(this.config());
     }
 
@@ -344,8 +331,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
     }
 
     private void checkValid() {
-        E.checkState(this.rocksdb.isOwningHandle(),
-                     "It seems RocksDB has been closed");
+        E.checkState(this.rocksdb.isOwningHandle(), "It seems RocksDB has been 
closed");
     }
 
     private RocksDB rocksdb() {
@@ -379,13 +365,11 @@ public class RocksDBStdSessions extends RocksDBSessions {
         }
     }
 
-    private static OpenedRocksDB openRocksDB(HugeConfig config,
-                                             String dataPath, String walPath)
-                                             throws RocksDBException {
+    private static OpenedRocksDB openRocksDB(HugeConfig config, String 
dataPath,
+                                             String walPath) throws 
RocksDBException {
         // Init options
         Options options = new Options();
-        RocksDBStdSessions.initOptions(config, options, options,
-                                       options, options);
+        RocksDBStdSessions.initOptions(config, options, options, options, 
options);
         options.setWalDir(walPath);
         SstFileManager sstFileManager = new SstFileManager(Env.getDefault());
         options.setSstFileManager(sstFileManager);
@@ -399,9 +383,8 @@ public class RocksDBStdSessions extends RocksDBSessions {
     }
 
     private static OpenedRocksDB openRocksDB(HugeConfig config,
-                                             List<String> cfNames,
-                                             String dataPath, String walPath)
-                                             throws RocksDBException {
+                                             List<String> cfNames, String 
dataPath,
+                                             String walPath) throws 
RocksDBException {
         // Old CFs should always be opened
         Set<String> mergedCFs = RocksDBStdSessions.mergeOldCFs(dataPath,
                                                                cfNames);
@@ -412,8 +395,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
         for (String cf : cfs) {
             ColumnFamilyDescriptor cfd = new 
ColumnFamilyDescriptor(encode(cf));
             ColumnFamilyOptions options = cfd.getOptions();
-            RocksDBStdSessions.initOptions(config, null, null,
-                                           options, options);
+            RocksDBStdSessions.initOptions(config, null, null, options, 
options);
             cfds.add(cfd);
         }
 
@@ -440,8 +422,8 @@ public class RocksDBStdSessions extends RocksDBSessions {
         return new OpenedRocksDB(rocksdb, cfHandles, sstFileManager);
     }
 
-    private static Set<String> mergeOldCFs(String path, List<String> cfNames)
-                                           throws RocksDBException {
+    private static Set<String> mergeOldCFs(String path,
+                                           List<String> cfNames) throws 
RocksDBException {
         Set<String> cfs = listCFs(path);
         cfs.addAll(cfNames);
         return cfs;
@@ -486,35 +468,28 @@ public class RocksDBStdSessions extends RocksDBSessions {
                 db.setEnableWriteThreadAdaptiveYield(true);
             }
             db.setInfoLogLevel(InfoLogLevel.valueOf(
-                    conf.get(RocksDBOptions.LOG_LEVEL) + "_LEVEL"));
+                conf.get(RocksDBOptions.LOG_LEVEL) + "_LEVEL"));
 
-            db.setMaxSubcompactions(
-                    conf.get(RocksDBOptions.MAX_SUB_COMPACTIONS));
+            
db.setMaxSubcompactions(conf.get(RocksDBOptions.MAX_SUB_COMPACTIONS));
 
-            db.setAllowMmapWrites(
-                    conf.get(RocksDBOptions.ALLOW_MMAP_WRITES));
-            db.setAllowMmapReads(
-                    conf.get(RocksDBOptions.ALLOW_MMAP_READS));
+            db.setAllowMmapWrites(conf.get(RocksDBOptions.ALLOW_MMAP_WRITES));
+            db.setAllowMmapReads(conf.get(RocksDBOptions.ALLOW_MMAP_READS));
 
-            db.setUseDirectReads(
-                    conf.get(RocksDBOptions.USE_DIRECT_READS));
+            db.setUseDirectReads(conf.get(RocksDBOptions.USE_DIRECT_READS));
             db.setUseDirectIoForFlushAndCompaction(
-                    conf.get(RocksDBOptions.USE_DIRECT_READS_WRITES_FC));
+                conf.get(RocksDBOptions.USE_DIRECT_READS_WRITES_FC));
 
             db.setUseFsync(conf.get(RocksDBOptions.USE_FSYNC));
 
             db.setAtomicFlush(conf.get(RocksDBOptions.ATOMIC_FLUSH));
 
-            db.setMaxManifestFileSize(
-                    conf.get(RocksDBOptions.MAX_MANIFEST_FILE_SIZE));
+            
db.setMaxManifestFileSize(conf.get(RocksDBOptions.MAX_MANIFEST_FILE_SIZE));
 
-            db.setSkipStatsUpdateOnDbOpen(
-                    conf.get(RocksDBOptions.SKIP_STATS_UPDATE_ON_DB_OPEN));
+            
db.setSkipStatsUpdateOnDbOpen(conf.get(RocksDBOptions.SKIP_STATS_UPDATE_ON_DB_OPEN));
             db.setSkipCheckingSstFileSizesOnDbOpen(
-                    conf.get(RocksDBOptions.SKIP_CHECK_SIZE_ON_DB_OPEN));
+                conf.get(RocksDBOptions.SKIP_CHECK_SIZE_ON_DB_OPEN));
 
-            db.setMaxFileOpeningThreads(
-                    conf.get(RocksDBOptions.MAX_FILE_OPENING_THREADS));
+            
db.setMaxFileOpeningThreads(conf.get(RocksDBOptions.MAX_FILE_OPENING_THREADS));
 
             db.setDbWriteBufferSize(conf.get(RocksDBOptions.DB_MEMTABLE_SIZE));
 
@@ -535,8 +510,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
              */
             mdb.setMaxBackgroundJobs(conf.get(RocksDBOptions.MAX_BG_JOBS));
 
-            mdb.setDelayedWriteRate(
-                    conf.get(RocksDBOptions.DELAYED_WRITE_RATE));
+            
mdb.setDelayedWriteRate(conf.get(RocksDBOptions.DELAYED_WRITE_RATE));
 
             mdb.setMaxOpenFiles(conf.get(RocksDBOptions.MAX_OPEN_FILES));
 
@@ -544,14 +518,12 @@ public class RocksDBStdSessions extends RocksDBSessions {
 
             mdb.setBytesPerSync(conf.get(RocksDBOptions.BYTES_PER_SYNC));
             
mdb.setWalBytesPerSync(conf.get(RocksDBOptions.WAL_BYTES_PER_SYNC));
-            mdb.setStrictBytesPerSync(
-                    conf.get(RocksDBOptions.STRICT_BYTES_PER_SYNC));
+            
mdb.setStrictBytesPerSync(conf.get(RocksDBOptions.STRICT_BYTES_PER_SYNC));
 
-            mdb.setCompactionReadaheadSize(
-                    conf.get(RocksDBOptions.COMPACTION_READAHEAD_SIZE));
+            
mdb.setCompactionReadaheadSize(conf.get(RocksDBOptions.COMPACTION_READAHEAD_SIZE));
 
-            mdb.setDeleteObsoleteFilesPeriodMicros(1000000 *
-                    conf.get(RocksDBOptions.DELETE_OBSOLETE_FILE_PERIOD));
+            mdb.setDeleteObsoleteFilesPeriodMicros(
+                1000000 * 
conf.get(RocksDBOptions.DELETE_OBSOLETE_FILE_PERIOD));
         }
 
         if (cf != null) {
@@ -562,38 +534,30 @@ public class RocksDBStdSessions extends RocksDBSessions {
             }
 
             int numLevels = conf.get(RocksDBOptions.NUM_LEVELS);
-            List<CompressionType> compressions = conf.get(
-                                  RocksDBOptions.LEVELS_COMPRESSIONS);
-            E.checkArgument(compressions.isEmpty() ||
-                            compressions.size() == numLevels,
+            List<CompressionType> compressions = 
conf.get(RocksDBOptions.LEVELS_COMPRESSIONS);
+            E.checkArgument(compressions.isEmpty() || compressions.size() == 
numLevels,
                             "Elements number of '%s' must be 0 or " +
                             "be the same as '%s', but got %s != %s",
                             RocksDBOptions.LEVELS_COMPRESSIONS.name(),
-                            RocksDBOptions.NUM_LEVELS.name(),
-                            compressions.size(), numLevels);
+                            RocksDBOptions.NUM_LEVELS.name(), 
compressions.size(), numLevels);
 
             cf.setNumLevels(numLevels);
             cf.setCompactionStyle(conf.get(RocksDBOptions.COMPACTION_STYLE));
 
-            cf.setBottommostCompressionType(
-                    conf.get(RocksDBOptions.BOTTOMMOST_COMPRESSION));
+            
cf.setBottommostCompressionType(conf.get(RocksDBOptions.BOTTOMMOST_COMPRESSION));
             if (!compressions.isEmpty()) {
                 cf.setCompressionPerLevel(compressions);
             }
 
-            cf.setMinWriteBufferNumberToMerge(
-                    conf.get(RocksDBOptions.MIN_MEMTABLES_TO_MERGE));
+            
cf.setMinWriteBufferNumberToMerge(conf.get(RocksDBOptions.MIN_MEMTABLES_TO_MERGE));
             cf.setMaxWriteBufferNumberToMaintain(
-                    conf.get(RocksDBOptions.MAX_MEMTABLES_TO_MAINTAIN));
+                conf.get(RocksDBOptions.MAX_MEMTABLES_TO_MAINTAIN));
 
-            cf.setInplaceUpdateSupport(
-                    conf.get(RocksDBOptions.MEMTABLE_INPLACE_UPDATE_SUPPORT));
+            
cf.setInplaceUpdateSupport(conf.get(RocksDBOptions.MEMTABLE_INPLACE_UPDATE_SUPPORT));
 
-            cf.setLevelCompactionDynamicLevelBytes(
-                    conf.get(RocksDBOptions.DYNAMIC_LEVEL_BYTES));
+            
cf.setLevelCompactionDynamicLevelBytes(conf.get(RocksDBOptions.DYNAMIC_LEVEL_BYTES));
 
-            cf.setOptimizeFiltersForHits(
-                    conf.get(RocksDBOptions.BLOOM_FILTERS_SKIP_LAST_LEVEL));
+            
cf.setOptimizeFiltersForHits(conf.get(RocksDBOptions.BLOOM_FILTERS_SKIP_LAST_LEVEL));
 
             cf.setTableFormatConfig(initTableConfig(conf));
 
@@ -613,27 +577,22 @@ public class RocksDBStdSessions extends RocksDBSessions {
             mcf.setWriteBufferSize(conf.get(RocksDBOptions.MEMTABLE_SIZE));
             
mcf.setMaxWriteBufferNumber(conf.get(RocksDBOptions.MAX_MEMTABLES));
 
-            mcf.setMaxBytesForLevelBase(
-                    conf.get(RocksDBOptions.MAX_LEVEL1_BYTES));
-            mcf.setMaxBytesForLevelMultiplier(
-                    conf.get(RocksDBOptions.MAX_LEVEL_BYTES_MULTIPLIER));
+            
mcf.setMaxBytesForLevelBase(conf.get(RocksDBOptions.MAX_LEVEL1_BYTES));
+            
mcf.setMaxBytesForLevelMultiplier(conf.get(RocksDBOptions.MAX_LEVEL_BYTES_MULTIPLIER));
 
-            mcf.setTargetFileSizeBase(
-                    conf.get(RocksDBOptions.TARGET_FILE_SIZE_BASE));
-            mcf.setTargetFileSizeMultiplier(
-                    conf.get(RocksDBOptions.TARGET_FILE_SIZE_MULTIPLIER));
+            
mcf.setTargetFileSizeBase(conf.get(RocksDBOptions.TARGET_FILE_SIZE_BASE));
+            
mcf.setTargetFileSizeMultiplier(conf.get(RocksDBOptions.TARGET_FILE_SIZE_MULTIPLIER));
 
             mcf.setLevel0FileNumCompactionTrigger(
-                    conf.get(RocksDBOptions.LEVEL0_COMPACTION_TRIGGER));
+                conf.get(RocksDBOptions.LEVEL0_COMPACTION_TRIGGER));
             mcf.setLevel0SlowdownWritesTrigger(
-                    conf.get(RocksDBOptions.LEVEL0_SLOWDOWN_WRITES_TRIGGER));
-            mcf.setLevel0StopWritesTrigger(
-                    conf.get(RocksDBOptions.LEVEL0_STOP_WRITES_TRIGGER));
+                conf.get(RocksDBOptions.LEVEL0_SLOWDOWN_WRITES_TRIGGER));
+            
mcf.setLevel0StopWritesTrigger(conf.get(RocksDBOptions.LEVEL0_STOP_WRITES_TRIGGER));
 
             mcf.setSoftPendingCompactionBytesLimit(
-                    conf.get(RocksDBOptions.SOFT_PENDING_COMPACTION_LIMIT));
+                conf.get(RocksDBOptions.SOFT_PENDING_COMPACTION_LIMIT));
             mcf.setHardPendingCompactionBytesLimit(
-                    conf.get(RocksDBOptions.HARD_PENDING_COMPACTION_LIMIT));
+                conf.get(RocksDBOptions.HARD_PENDING_COMPACTION_LIMIT));
 
             /*
              * TODO: also set memtable options:
@@ -643,11 +602,10 @@ public class RocksDBStdSessions extends RocksDBSessions {
              * 
#diff-cde52d1fcbcce2bc6aae27838f1d3e7e9e469ccad8aaf8f2695f939e279d7501R369
              */
             mcf.setMemtablePrefixBloomSizeRatio(
-                    conf.get(RocksDBOptions.MEMTABLE_BLOOM_SIZE_RATIO));
+                conf.get(RocksDBOptions.MEMTABLE_BLOOM_SIZE_RATIO));
             mcf.setMemtableWholeKeyFiltering(
-                    
conf.get(RocksDBOptions.MEMTABLE_BLOOM_WHOLE_KEY_FILTERING));
-            mcf.setMemtableHugePageSize(
-                    conf.get(RocksDBOptions.MEMTABL_BLOOM_HUGE_PAGE_SIZE));
+                conf.get(RocksDBOptions.MEMTABLE_BLOOM_WHOLE_KEY_FILTERING));
+            
mcf.setMemtableHugePageSize(conf.get(RocksDBOptions.MEMTABL_BLOOM_HUGE_PAGE_SIZE));
 
             boolean bulkload = conf.get(RocksDBOptions.BULKLOAD_MODE);
             if (bulkload) {
@@ -671,8 +629,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
     public static TableFormatConfig initTableConfig(HugeConfig conf) {
         BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
 
-        tableConfig.setFormatVersion(
-                conf.get(RocksDBOptions.TABLE_FORMAT_VERSION));
+        
tableConfig.setFormatVersion(conf.get(RocksDBOptions.TABLE_FORMAT_VERSION));
 
         /*
          * The index type used to lookup between data blocks:
@@ -689,17 +646,14 @@ public class RocksDBStdSessions extends RocksDBSessions {
          * The search type of point lookup can be BinarySearch or HashSearch:
          * https://github.com/facebook/rocksdb/wiki/Data-Block-Hash-Index
          */
-        tableConfig.setDataBlockIndexType(
-                conf.get(RocksDBOptions.DATA_BLOCK_SEARCH_TYPE));
+        
tableConfig.setDataBlockIndexType(conf.get(RocksDBOptions.DATA_BLOCK_SEARCH_TYPE));
         tableConfig.setDataBlockHashTableUtilRatio(
-                conf.get(RocksDBOptions.DATA_BLOCK_HASH_TABLE_RATIO));
+            conf.get(RocksDBOptions.DATA_BLOCK_HASH_TABLE_RATIO));
 
         long blockSize = conf.get(RocksDBOptions.BLOCK_SIZE);
         tableConfig.setBlockSize(blockSize);
-        tableConfig.setBlockSizeDeviation(
-                conf.get(RocksDBOptions.BLOCK_SIZE_DEVIATION));
-        tableConfig.setBlockRestartInterval(
-                conf.get(RocksDBOptions.BLOCK_RESTART_INTERVAL));
+        
tableConfig.setBlockSizeDeviation(conf.get(RocksDBOptions.BLOCK_SIZE_DEVIATION));
+        
tableConfig.setBlockRestartInterval(conf.get(RocksDBOptions.BLOCK_RESTART_INTERVAL));
 
         // https://github.com/facebook/rocksdb/wiki/Block-Cache
         long cacheCapacity = conf.get(RocksDBOptions.BLOCK_CACHE_CAPACITY);
@@ -715,16 +669,14 @@ public class RocksDBStdSessions extends RocksDBSessions {
         if (bitsPerKey >= 0) {
             // TODO: use space-saving RibbonFilterPolicy
             boolean blockBased = conf.get(RocksDBOptions.BLOOM_FILTER_MODE);
-            tableConfig.setFilterPolicy(new BloomFilter(bitsPerKey,
-                                                        blockBased));
+            tableConfig.setFilterPolicy(new BloomFilter(bitsPerKey, 
blockBased));
 
-            tableConfig.setWholeKeyFiltering(
-                    conf.get(RocksDBOptions.BLOOM_FILTER_WHOLE_KEY));
+            
tableConfig.setWholeKeyFiltering(conf.get(RocksDBOptions.BLOOM_FILTER_WHOLE_KEY));
 
             tableConfig.setCacheIndexAndFilterBlocks(
-                    conf.get(RocksDBOptions.CACHE_FILTER_AND_INDEX));
+                conf.get(RocksDBOptions.CACHE_FILTER_AND_INDEX));
             tableConfig.setPinL0FilterAndIndexBlocksInCache(
-                    conf.get(RocksDBOptions.PIN_L0_INDEX_AND_FILTER));
+                conf.get(RocksDBOptions.PIN_L0_INDEX_AND_FILTER));
 
             // 
https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
             if (conf.get(RocksDBOptions.PARTITION_FILTERS_INDEXES)) {
@@ -734,7 +686,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
                            .setMetadataBlockSize(blockSize)
                            .setCacheIndexAndFilterBlocksWithHighPriority(true);
                 tableConfig.setPinTopLevelIndexAndFilter(
-                            conf.get(RocksDBOptions.PIN_TOP_INDEX_AND_FILTER));
+                    conf.get(RocksDBOptions.PIN_TOP_INDEX_AND_FILTER));
             }
         }
 
@@ -898,7 +850,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
         /**
          * Merge a record to an existing key to a table
          * For more details about merge-operator:
-         *  https://github.com/facebook/rocksdb/wiki/merge-operator
+         *  <a 
href="https://github.com/facebook/rocksdb/wiki/merge-operator";>...</a>
          */
         @Override
         public void merge(String table, byte[] key, byte[] value) {
@@ -950,8 +902,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
          * Delete a record by key(or prefix with key) from a table
          */
         @Override
-        public void deletePrefix(String table, byte[] key) {
-            byte[] keyFrom = key;
+        public void deletePrefix(String table, byte[] keyFrom) {
             byte[] keyTo = Arrays.copyOf(keyFrom, keyFrom.length);
             BinarySerializer.increaseOne(keyTo);
             try (OpenedRocksDB.CFHandle cf = cf(table)) {
@@ -1044,8 +995,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
              */
             try (OpenedRocksDB.CFHandle cf = cf(table)) {
                 ReusedRocksIterator iter = cf.newIterator();
-                return new ScanIterator(table, iter, prefix, null,
-                                        SCAN_PREFIX_BEGIN);
+                return new ScanIterator(table, iter, prefix, null, 
SCAN_PREFIX_BEGIN);
             }
         }
 
@@ -1076,8 +1026,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
     /**
      * A wrapper for RocksIterator that convert RocksDB results to std Iterator
      */
-    private static class ScanIterator implements BackendColumnIterator,
-                                                 Countable {
+    private static class ScanIterator implements BackendColumnIterator, 
Countable {
 
         private final String table;
         private final ReusedRocksIterator reusedIter;
@@ -1164,14 +1113,12 @@ public class RocksDBStdSessions extends RocksDBSessions 
{
         @SuppressWarnings("unused")
         private void dump() {
             this.seek();
-            LOG.info(">>>> scan from {}: {}{}",
-                      this.table,
-                      this.keyBegin == null ? "*" : 
StringEncoding.format(this.keyBegin),
-                      this.iter.isValid() ? "" : " - No data");
+            LOG.info(">>>> scan from {}: {}{}", this.table,
+                     this.keyBegin == null ? "*" : 
StringEncoding.format(this.keyBegin),
+                     this.iter.isValid() ? "" : " - No data");
             for (; this.iter.isValid(); this.iter.next()) {
-                LOG.info("{}={}",
-                          StringEncoding.format(this.iter.key()),
-                          StringEncoding.format(this.iter.value()));
+                LOG.info("{}={}", StringEncoding.format(this.iter.key()),
+                         StringEncoding.format(this.iter.value()));
             }
         }
 
@@ -1202,7 +1149,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
         }
 
         private void seek() {
-            if (this.keyBegin == null || this.keyBegin.length <= 0) {
+            if (this.keyBegin == null || this.keyBegin.length == 0) {
                 // Seek to the first if no `keyBegin`
                 this.iter.seekToFirst();
             } else {
@@ -1216,8 +1163,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
                 // Skip `keyBegin` if set SCAN_GT_BEGIN (key > 'xx')
                 if (this.match(Session.SCAN_GT_BEGIN) &&
                     !this.match(Session.SCAN_GTE_BEGIN)) {
-                    while (this.iter.isValid() &&
-                           Bytes.equals(this.iter.key(), this.keyBegin)) {
+                    while (this.iter.isValid() && 
Bytes.equals(this.iter.key(), this.keyBegin)) {
                         this.iter.next();
                     }
                 }
@@ -1254,10 +1200,8 @@ public class RocksDBStdSessions extends RocksDBSessions {
                     return Bytes.compare(key, this.keyEnd) < 0;
                 }
             } else {
-                assert this.match(Session.SCAN_ANY) ||
-                       this.match(Session.SCAN_GT_BEGIN) ||
-                       this.match(Session.SCAN_GTE_BEGIN) :
-                       "Unknow scan type";
+                assert this.match(Session.SCAN_ANY) || 
this.match(Session.SCAN_GT_BEGIN) ||
+                       this.match(Session.SCAN_GTE_BEGIN) : "Unknown scan 
type";
                 return true;
             }
         }
@@ -1270,8 +1214,7 @@ public class RocksDBStdSessions extends RocksDBSessions {
                 }
             }
 
-            BackendColumn col = BackendColumn.of(this.iter.key(),
-                                                 this.iter.value());
+            BackendColumn col = BackendColumn.of(this.iter.key(), 
this.iter.value());
             this.iter.next();
             this.matched = false;
 
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBStore.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBStore.java
index 283baa622..c9a27b770 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBStore.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBStore.java
@@ -93,9 +93,10 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
 
     private static final String TABLE_GENERAL_KEY = "general";
     private static final String DB_OPEN = "db-open-%s";
+
     private static final long DB_OPEN_TIMEOUT = 600L; // unit s
     private static final long DB_CLOSE_TIMEOUT = 30L; // unit s
-    /*
+    /**
      * This is threads number used to concurrently opening RocksDB dbs,
      * 8 is supposed enough due to configurable data disks and
      * disk number of one machine
@@ -107,7 +108,6 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
                         final String database, final String store) {
         this.tables = new HashMap<>();
         this.olapTables = new HashMap<>();
-
         this.provider = provider;
         this.database = database;
         this.store = store;
@@ -221,8 +221,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
         }
 
         List<Future<?>> futures = new ArrayList<>();
-        ExecutorService openPool = ExecutorUtil.newFixedThreadPool(
-                                   OPEN_POOL_THREADS, DB_OPEN);
+        ExecutorService openPool = 
ExecutorUtil.newFixedThreadPool(OPEN_POOL_THREADS, DB_OPEN);
         // Open base disk
         futures.add(openPool.submit(() -> {
             this.sessions = this.open(config, this.tableNames());
@@ -282,8 +281,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
             Consumers.executeOncePerThread(openPool, OPEN_POOL_THREADS,
                                            this::closeSessions, 
DB_CLOSE_TIMEOUT);
         } catch (InterruptedException e) {
-            throw new BackendException("Failed to close session opened by " +
-                                       "open-pool");
+            throw new BackendException("Failed to close session opened by 
open-pool");
         }
 
         boolean terminated;
@@ -292,8 +290,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
             terminated = openPool.awaitTermination(DB_OPEN_TIMEOUT,
                                                    TimeUnit.SECONDS);
         } catch (Throwable e) {
-            throw new BackendException(
-                      "Failed to wait db-open thread pool shutdown", e);
+            throw new BackendException("Failed to wait db-open thread pool 
shutdown", e);
         }
         if (!terminated) {
             LOG.warn("Timeout when waiting db-open thread pool shutdown");
@@ -346,8 +343,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
                     none = null;
                 }
                 try {
-                    sessions = this.openSessionPool(config, dataPath,
-                                                    walPath, none);
+                    sessions = this.openSessionPool(config, dataPath, walPath, 
none);
                 } catch (RocksDBException e1) {
                     e = e1;
                 }
@@ -360,8 +356,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
             if (sessions == null) {
                 // Error after trying other ways
                 LOG.error("Failed to open RocksDB '{}'", dataPath, e);
-                throw new ConnectionException("Failed to open RocksDB '%s'",
-                                              e, dataPath);
+                throw new ConnectionException("Failed to open RocksDB '%s'", 
e, dataPath);
             }
         }
 
@@ -377,11 +372,9 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
 
     protected RocksDBSessions openSessionPool(HugeConfig config,
                                               String dataPath, String walPath,
-                                              List<String> tableNames)
-                                              throws RocksDBException {
+                                              List<String> tableNames) throws 
RocksDBException {
         if (tableNames == null) {
-            return new RocksDBStdSessions(config, this.database, this.store,
-                                          dataPath, walPath);
+            return new RocksDBStdSessions(config, this.database, this.store, 
dataPath, walPath);
         } else {
             return new RocksDBStdSessions(config, this.database, this.store,
                                           dataPath, walPath, tableNames);
@@ -404,8 +397,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
         for (Entry<HugeType, String> e : this.tableDiskMapping.entrySet()) {
             HugeType type = e.getKey();
             RocksDBSessions db = this.db(e.getValue());
-            String key = type != HugeType.OLAP ? this.table(type).table() :
-                                                 type.string();
+            String key = type != HugeType.OLAP ? this.table(type).table() : 
type.string();
             tableDBMap.put(key, db);
         }
         return tableDBMap;
@@ -418,7 +410,6 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
     @Override
     public void close() {
         LOG.debug("Store close: {}", this.store);
-
         this.checkOpened();
         this.closeSessions();
     }
@@ -435,15 +426,13 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
         readLock.lock();
         try {
             this.checkOpened();
-
             if (LOG.isDebugEnabled()) {
                 LOG.debug("Store {} mutation: {}", this.store, mutation);
             }
 
             for (HugeType type : mutation.types()) {
                 RocksDBSessions.Session session = this.session(type);
-                for (Iterator<BackendAction> it = mutation.mutation(type);
-                     it.hasNext();) {
+                for (Iterator<BackendAction> it = mutation.mutation(type); 
it.hasNext(); ) {
                     this.mutate(session, it.next());
                 }
             }
@@ -454,8 +443,8 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
 
     private void mutate(RocksDBSessions.Session session, BackendAction item) {
         BackendEntry entry = item.entry();
-
         RocksDBTable table;
+
         if (!entry.olap()) {
             // Oltp table
             table = this.table(entry.type());
@@ -469,6 +458,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
             }
             session = this.session(HugeType.OLAP);
         }
+
         switch (item.action()) {
             case INSERT:
                 table.insert(session, entry);
@@ -489,8 +479,8 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
                 table.updateIfAbsent(session, entry);
                 break;
             default:
-                throw new AssertionError(String.format(
-                          "Unsupported mutate action: %s", item.action()));
+                throw new AssertionError(String.format("Unsupported mutate 
action: %s",
+                                                       item.action()));
         }
     }
 
@@ -498,9 +488,9 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
     public Iterator<BackendEntry> query(Query query) {
         Lock readLock = this.storeLock.readLock();
         readLock.lock();
+
         try {
             this.checkOpened();
-
             HugeType tableType = RocksDBTable.tableType(query);
             RocksDBTable table;
             RocksDBSessions.Session session;
@@ -522,8 +512,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
                     table = this.table(this.olapTableName(pk));
                     iterators.add(table.query(this.session(HugeType.OLAP), q));
                 }
-                entries = new MergeIterator<>(entries, iterators,
-                                              BackendEntry::mergeable);
+                entries = new MergeIterator<>(entries, iterators, 
BackendEntry::mergeable);
             }
             return entries;
         } finally {
@@ -537,7 +526,6 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
         readLock.lock();
         try {
             this.checkOpened();
-
             HugeType tableType = RocksDBTable.tableType(query);
             RocksDBTable table = this.table(tableType);
             return table.queryNumber(this.session(tableType), query);
@@ -552,10 +540,8 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
         writeLock.lock();
         try {
             this.checkDbOpened();
-
             // Create tables with main disk
-            this.createTable(this.sessions,
-                             this.tableNames().toArray(new String[0]));
+            this.createTable(this.sessions, this.tableNames().toArray(new 
String[0]));
 
             // Create table with optimized disk
             Map<String, RocksDBSessions> tableDBMap = this.tableDBMapping();
@@ -590,10 +576,8 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
         writeLock.lock();
         try {
             this.checkDbOpened();
-
             // Drop tables with main disk
-            this.dropTable(this.sessions,
-                           this.tableNames().toArray(new String[0]));
+            this.dropTable(this.sessions, this.tableNames().toArray(new 
String[0]));
 
             // Drop tables with optimized disk
             Map<String, RocksDBSessions> tableDBMap = this.tableDBMapping();
@@ -630,10 +614,10 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
     @Override
     public boolean initialized() {
         this.checkDbOpened();
-
         if (!this.opened()) {
             return false;
         }
+
         for (String table : this.tableNames()) {
             if (!this.sessions.existsTable(table)) {
                 return false;
@@ -726,7 +710,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
         readLock.lock();
         try {
             Map<String, String> uniqueSnapshotDirMaps = new HashMap<>();
-            // Every rocksdb instance should create an snapshot
+            // Every rocksdb instance should create a snapshot
             for (Map.Entry<String, RocksDBSessions> entry : 
this.dbs.entrySet()) {
                 // Like: parent_path/rocksdb-data/*, * maybe g,m,s
                 Path originDataPath = 
Paths.get(entry.getKey()).toAbsolutePath();
@@ -743,8 +727,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
 
                 String snapshotDir = 
snapshotPath.toAbsolutePath().getParent().toString();
                 // Find correspond data HugeType key
-                String diskTableKey = this.findDiskTableKeyByPath(
-                                      entry.getKey());
+                String diskTableKey = 
this.findDiskTableKeyByPath(entry.getKey());
                 uniqueSnapshotDirMaps.put(snapshotDir, diskTableKey);
             }
             LOG.info("The store '{}' create snapshot successfully", this);
@@ -775,7 +758,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
             }
 
             for (Map.Entry<String, RocksDBSessions> entry :
-                 snapshotPaths.entrySet()) {
+                snapshotPaths.entrySet()) {
                 String snapshotPath = entry.getKey();
                 RocksDBSessions sessions = entry.getValue();
                 sessions.resumeSnapshot(snapshotPath);
@@ -819,8 +802,7 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
     }
 
     private void closeSessions() {
-        Iterator<Map.Entry<String, RocksDBSessions>> iter = this.dbs.entrySet()
-                                                                    
.iterator();
+        Iterator<Map.Entry<String, RocksDBSessions>> iter = 
this.dbs.entrySet().iterator();
         while (iter.hasNext()) {
             Map.Entry<String, RocksDBSessions> entry = iter.next();
             RocksDBSessions sessions = entry.getValue();
@@ -835,23 +817,20 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
         return this.dbs.values();
     }
 
-    private void parseTableDiskMapping(Map<String, String> disks,
-                                       String dataPath) {
+    private void parseTableDiskMapping(Map<String, String> disks, String 
dataPath) {
 
         this.tableDiskMapping.clear();
         for (Map.Entry<String, String> disk : disks.entrySet()) {
             // The format of `disk` like: `graph/vertex: /path/to/disk1`
             String name = disk.getKey();
             String path = disk.getValue();
-            E.checkArgument(!dataPath.equals(path), "Invalid disk path" +
-                            "(can't be the same as data_path): '%s'", path);
+            E.checkArgument(!dataPath.equals(path),
+                            "Invalid disk path (can't be the same as 
data_path): '%s'", path);
             E.checkArgument(!name.isEmpty() && !path.isEmpty(),
-                            "Invalid disk format: '%s', expect `NAME:PATH`",
-                            disk);
+                            "Invalid disk format: '%s', expect `NAME:PATH`", 
disk);
             String[] pair = name.split("/", 2);
             E.checkArgument(pair.length == 2,
-                            "Invalid disk key format: '%s', " +
-                            "expect `STORE/TABLE`", name);
+                            "Invalid disk key format: '%s', expect 
`STORE/TABLE`", name);
             String store = pair[0].trim();
             HugeType table = HugeType.valueOf(pair[1].trim().toUpperCase());
             if (this.store.equals(store)) {
@@ -948,14 +927,10 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
 
             this.counters = new RocksDBTables.Counters(database);
 
-            registerTableManager(HugeType.VERTEX_LABEL,
-                                 new RocksDBTables.VertexLabel(database));
-            registerTableManager(HugeType.EDGE_LABEL,
-                                 new RocksDBTables.EdgeLabel(database));
-            registerTableManager(HugeType.PROPERTY_KEY,
-                                 new RocksDBTables.PropertyKey(database));
-            registerTableManager(HugeType.INDEX_LABEL,
-                                 new RocksDBTables.IndexLabel(database));
+            registerTableManager(HugeType.VERTEX_LABEL, new 
RocksDBTables.VertexLabel(database));
+            registerTableManager(HugeType.EDGE_LABEL, new 
RocksDBTables.EdgeLabel(database));
+            registerTableManager(HugeType.PROPERTY_KEY, new 
RocksDBTables.PropertyKey(database));
+            registerTableManager(HugeType.INDEX_LABEL, new 
RocksDBTables.IndexLabel(database));
             registerTableManager(HugeType.SECONDARY_INDEX,
                                  new RocksDBTables.SecondaryIndex(database));
         }
@@ -1005,13 +980,10 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
                                  String database, String store) {
             super(provider, database, store);
 
-            registerTableManager(HugeType.VERTEX,
-                                 new RocksDBTables.Vertex(database));
+            registerTableManager(HugeType.VERTEX, new 
RocksDBTables.Vertex(database));
 
-            registerTableManager(HugeType.EDGE_OUT,
-                                 RocksDBTables.Edge.out(database));
-            registerTableManager(HugeType.EDGE_IN,
-                                 RocksDBTables.Edge.in(database));
+            registerTableManager(HugeType.EDGE_OUT, 
RocksDBTables.Edge.out(database));
+            registerTableManager(HugeType.EDGE_IN, 
RocksDBTables.Edge.in(database));
 
             registerTableManager(HugeType.SECONDARY_INDEX,
                                  new RocksDBTables.SecondaryIndex(database));
@@ -1053,20 +1025,17 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
 
         @Override
         public Id nextId(HugeType type) {
-            throw new UnsupportedOperationException(
-                      "RocksDBGraphStore.nextId()");
+            throw new 
UnsupportedOperationException("RocksDBGraphStore.nextId()");
         }
 
         @Override
         public void increaseCounter(HugeType type, long num) {
-            throw new UnsupportedOperationException(
-                      "RocksDBGraphStore.increaseCounter()");
+            throw new 
UnsupportedOperationException("RocksDBGraphStore.increaseCounter()");
         }
 
         @Override
         public long getCounter(HugeType type) {
-            throw new UnsupportedOperationException(
-                      "RocksDBGraphStore.getCounter()");
+            throw new 
UnsupportedOperationException("RocksDBGraphStore.getCounter()");
         }
 
         /**
@@ -1117,10 +1086,8 @@ public abstract class RocksDBStore extends 
AbstractBackendStore<RocksDBSessions.
 
         private final RocksDBTables.Meta meta;
 
-        public RocksDBSystemStore(BackendStoreProvider provider,
-                                  String database, String store) {
+        public RocksDBSystemStore(BackendStoreProvider provider, String 
database, String store) {
             super(provider, database, store);
-
             this.meta = new RocksDBTables.Meta(database);
         }
 
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBTable.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBTable.java
index 7a5af5f1a..ec2959d32 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBTable.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBTable.java
@@ -25,9 +25,6 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.lang3.tuple.Pair;
-import org.apache.tinkerpop.gremlin.util.iterator.IteratorUtils;
-import org.slf4j.Logger;
-
 import org.apache.hugegraph.backend.id.Id;
 import org.apache.hugegraph.backend.page.PageState;
 import org.apache.hugegraph.backend.query.Aggregate;
@@ -52,6 +49,8 @@ import org.apache.hugegraph.util.Bytes;
 import org.apache.hugegraph.util.E;
 import org.apache.hugegraph.util.Log;
 import org.apache.hugegraph.util.StringEncoding;
+import org.apache.tinkerpop.gremlin.util.iterator.IteratorUtils;
+import org.slf4j.Logger;
 
 public class RocksDBTable extends BackendTable<RocksDBSessions.Session, 
BackendEntry> {
 
@@ -67,8 +66,7 @@ public class RocksDBTable extends 
BackendTable<RocksDBSessions.Session, BackendE
     @Override
     protected void registerMetaHandlers() {
         this.registerMetaHandler("splits", (session, meta, args) -> {
-            E.checkArgument(args.length == 1,
-                            "The args count of %s must be 1", meta);
+            E.checkArgument(args.length == 1, "The args count of %s must be 
1", meta);
             long splitSize = (long) args[0];
             return this.shardSplitter.getSplits(session, splitSize);
         });
@@ -203,7 +201,7 @@ public class RocksDBTable extends 
BackendTable<RocksDBSessions.Session, BackendE
 
         // NOTE: this will lead to lazy create rocksdb iterator
         return BackendColumnIterator.wrap(new FlatMapperIterator<>(
-               ids.iterator(), id -> this.queryById(session, id)
+            ids.iterator(), id -> this.queryById(session, id)
         ));
     }
 
@@ -233,8 +231,7 @@ public class RocksDBTable extends 
BackendTable<RocksDBSessions.Session, BackendE
         int type = query.inclusiveStart() ?
                    RocksDBSessions.Session.SCAN_GTE_BEGIN : 
RocksDBSessions.Session.SCAN_GT_BEGIN;
         type |= RocksDBSessions.Session.SCAN_PREFIX_END;
-        return session.scan(this.table(), query.start().asBytes(),
-                            query.prefix().asBytes(), type);
+        return session.scan(this.table(), query.start().asBytes(), 
query.prefix().asBytes(), type);
     }
 
     protected BackendColumnIterator queryByRange(RocksDBSessions.Session 
session,
@@ -268,8 +265,7 @@ public class RocksDBTable extends 
BackendTable<RocksDBSessions.Session, BackendE
         byte[] end = this.shardSplitter.position(shard.end());
         if (page != null && !page.isEmpty()) {
             byte[] position = PageState.fromString(page).position();
-            E.checkArgument(start == null ||
-                            Bytes.compare(position, start) >= 0,
+            E.checkArgument(start == null || Bytes.compare(position, start) >= 
0,
                             "Invalid page out of lower bound");
             start = position;
         }
@@ -310,7 +306,6 @@ public class RocksDBTable extends 
BackendTable<RocksDBSessions.Session, BackendE
 
         private static final String MEM_SIZE = "rocksdb.size-all-mem-tables";
         private static final String SST_SIZE = "rocksdb.total-sst-files-size";
-
         private static final String NUM_KEYS = "rocksdb.estimate-num-keys";
 
         public RocksDBShardSplitter(String table) {
@@ -338,8 +333,7 @@ public class RocksDBTable extends 
BackendTable<RocksDBSessions.Session, BackendE
                 count = 1;
             }
 
-            Range range = new Range(keyRange.getLeft(),
-                                    Range.increase(keyRange.getRight()));
+            Range range = new Range(keyRange.getLeft(), 
Range.increase(keyRange.getRight()));
             List<Shard> splits = new ArrayList<>((int) count);
             splits.addAll(range.splitEven((int) count));
             return splits;
@@ -359,6 +353,7 @@ public class RocksDBTable extends 
BackendTable<RocksDBSessions.Session, BackendE
 
         @Override
         public byte[] position(String position) {
+            // TODO: START & END is same & be empty now? remove one?
             if (START.equals(position) || END.equals(position)) {
                 return null;
             }
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBTables.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBTables.java
index 06c2d91a1..dad0545ad 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBTables.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdb/RocksDBTables.java
@@ -120,8 +120,7 @@ public class RocksDBTables {
              * `scanPrefix + delete`: session.delete(scanPrefix(prefix))
              */
             byte[] prefix = entry.id().asBytes();
-            try (BackendColumnIterator results = session.scan(this.table(),
-                                                              prefix)) {
+            try (BackendColumnIterator results = session.scan(this.table(), 
prefix)) {
                 while (results.hasNext()) {
                     byte[] column = results.next().name;
                     session.delete(this.table(), column);
@@ -218,6 +217,7 @@ public class RocksDBTables {
         }
 
         @Override
+        // TODO: why this method is same as super.eliminate() in RocksDBTable, 
del it?
         public void eliminate(RocksDBSessions.Session session, BackendEntry 
entry) {
             assert entry.columns().size() == 1;
             super.delete(session, entry);
@@ -291,10 +291,8 @@ public class RocksDBTables {
         protected BackendColumnIterator queryByCond(RocksDBSessions.Session 
session,
                                                     ConditionQuery query) {
             assert query.conditionsSize() > 0;
-
             List<Condition> conds = query.syspropConditions(HugeKeys.ID);
-            E.checkArgument(!conds.isEmpty(),
-                            "Please specify the index conditions");
+            E.checkArgument(!conds.isEmpty(), "Please specify the index 
conditions");
 
             Id prefix = null;
             Id min = null;
@@ -323,8 +321,7 @@ public class RocksDBTables {
                         max = (Id) r.value();
                         break;
                     default:
-                        E.checkArgument(false, "Unsupported relation '%s'",
-                                        r.relation());
+                        E.checkArgument(false, "Unsupported relation '%s'", 
r.relation());
                 }
             }
 
@@ -340,7 +337,8 @@ public class RocksDBTables {
                                     RocksDBSessions.Session.SCAN_PREFIX_END);
             } else {
                 byte[] end = max.asBytes();
-                int type = maxEq ? RocksDBSessions.Session.SCAN_LTE_END : 
RocksDBSessions.Session.SCAN_LT_END;
+                int type = maxEq ? RocksDBSessions.Session.SCAN_LTE_END
+                                 : RocksDBSessions.Session.SCAN_LT_END;
                 return session.scan(this.table(), begin, end, type);
             }
         }
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdbsst/RocksDBSstSessions.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdbsst/RocksDBSstSessions.java
index 3d2b7f867..d7ce2db87 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdbsst/RocksDBSstSessions.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdbsst/RocksDBSstSessions.java
@@ -31,11 +31,6 @@ import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.tuple.Pair;
-import org.rocksdb.EnvOptions;
-import org.rocksdb.Options;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.SstFileWriter;
-
 import org.apache.hugegraph.backend.BackendException;
 import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBIngester;
@@ -44,14 +39,17 @@ import 
org.apache.hugegraph.backend.store.rocksdb.RocksDBStdSessions;
 import org.apache.hugegraph.config.HugeConfig;
 import org.apache.hugegraph.exception.NotSupportException;
 import org.apache.hugegraph.util.E;
+import org.rocksdb.EnvOptions;
+import org.rocksdb.Options;
+import org.rocksdb.RocksDBException;
+import org.rocksdb.SstFileWriter;
 
 public class RocksDBSstSessions extends RocksDBSessions {
 
     private final String dataPath;
     private final Map<String, SstFileWriter> tables;
 
-    public RocksDBSstSessions(HugeConfig config, String database, String store,
-                              String dataPath) {
+    public RocksDBSstSessions(HugeConfig config, String database, String 
store, String dataPath) {
         super(config, database, store);
 
         this.dataPath = dataPath;
@@ -63,8 +61,7 @@ public class RocksDBSstSessions extends RocksDBSessions {
         }
     }
 
-    public RocksDBSstSessions(HugeConfig config, String dataPath,
-                              String database, String store,
+    public RocksDBSstSessions(HugeConfig config, String dataPath, String 
database, String store,
                               List<String> tableNames) throws RocksDBException 
{
         this(config, dataPath, database, store);
         for (String table : tableNames) {
@@ -96,8 +93,7 @@ public class RocksDBSstSessions extends RocksDBSessions {
     }
 
     @Override
-    public synchronized void createTable(String... tables)
-                                         throws RocksDBException {
+    public synchronized void createTable(String... tables) throws 
RocksDBException {
         for (String table : tables) {
             this.createTable(table);
         }
@@ -105,8 +101,7 @@ public class RocksDBSstSessions extends RocksDBSessions {
 
     private void createTable(String table) throws RocksDBException {
         String number = String.format("%04d", 1);
-        Path sstFile = Paths.get(this.dataPath, table,
-                                 number + RocksDBIngester.SST);
+        Path sstFile = Paths.get(this.dataPath, table, number + 
RocksDBIngester.SST);
         try {
             
FileUtils.forceMkdir(sstFile.toAbsolutePath().getParent().toFile());
         } catch (IOException e) {
@@ -116,8 +111,7 @@ public class RocksDBSstSessions extends RocksDBSessions {
 
         EnvOptions env = new EnvOptions();
         Options options = new Options();
-        RocksDBStdSessions.initOptions(this.config(), options, options,
-                                       options, options);
+        RocksDBStdSessions.initOptions(this.config(), options, options, 
options, options);
         // NOTE: unset merge op due to SIGSEGV when cf.setMergeOperatorName()
         options.setMergeOperatorName("not-exist-merge-op");
         SstFileWriter sst = new SstFileWriter(env, options);
@@ -126,17 +120,17 @@ public class RocksDBSstSessions extends RocksDBSessions {
     }
 
     @Override
-    public synchronized void dropTable(String... tables)
-                                       throws RocksDBException {
+    public synchronized void dropTable(String... tables) throws 
RocksDBException {
         for (String table : tables) {
             this.dropTable(table);
         }
     }
 
-    public void dropTable(String table) throws RocksDBException {
-        SstFileWriter sst = this.tables.remove(table);
-        assert sst == null || !sst.isOwningHandle() :
-               "Please close table before drop to ensure call sst.finish()";
+    public void dropTable(String table) throws RocksDBException{
+        try (SstFileWriter sst = this.tables.remove(table)) {
+            assert sst == null || !sst.isOwningHandle() : "Please close table 
before drop to " +
+                                                          "ensure call 
sst.finish()";
+        }
     }
 
     @Override
@@ -155,8 +149,7 @@ public class RocksDBSstSessions extends RocksDBSessions {
     }
 
     @Override
-    public RocksDBSessions copy(HugeConfig config,
-                                String database, String store) {
+    public RocksDBSessions copy(HugeConfig config, String database, String 
store) {
         return new RocksDBSstSessions(config, database, store, this);
     }
 
@@ -176,8 +169,7 @@ public class RocksDBSstSessions extends RocksDBSessions {
     }
 
     @Override
-    public String hardLinkSnapshot(String snapshotPath)
-                                   throws RocksDBException {
+    public String hardLinkSnapshot(String snapshotPath) {
         throw new UnsupportedOperationException("hardLinkSnapshot");
     }
 
@@ -264,7 +256,7 @@ public class RocksDBSstSessions extends RocksDBSessions {
         @Override
         public Integer commit() {
             int count = this.batch.size();
-            if (count <= 0) {
+            if (count == 0) {
                 return 0;
             }
 
@@ -344,7 +336,7 @@ public class RocksDBSstSessions extends RocksDBSessions {
         /**
          * Merge a record to an existing key to a table
          * For more details about merge-operator:
-         *  https://github.com/facebook/rocksdb/wiki/merge-operator
+         *  <a 
href="https://github.com/facebook/rocksdb/wiki/merge-operator";>...</a>
          */
         @Override
         public void merge(String table, byte[] key, byte[] value) {
@@ -431,10 +423,8 @@ public class RocksDBSstSessions extends RocksDBSessions {
          * Scan records by key range from a table
          */
         @Override
-        public BackendColumnIterator scan(String table,
-                                          byte[] keyFrom,
-                                          byte[] keyTo,
-                                          int scanType) {
+        public BackendColumnIterator scan(String table, byte[] keyFrom,
+                                          byte[] keyTo, int scanType) {
             assert !this.hasChanges();
             return BackendColumnIterator.empty();
         }
diff --git 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdbsst/RocksDBSstStore.java
 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdbsst/RocksDBSstStore.java
index c88cd4970..12ccfdc15 100644
--- 
a/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdbsst/RocksDBSstStore.java
+++ 
b/hugegraph-server/hugegraph-rocksdb/src/main/java/org/apache/hugegraph/backend/store/rocksdbsst/RocksDBSstStore.java
@@ -19,8 +19,6 @@ package org.apache.hugegraph.backend.store.rocksdbsst;
 
 import java.util.List;
 
-import org.rocksdb.RocksDBException;
-
 import org.apache.hugegraph.backend.id.Id;
 import org.apache.hugegraph.backend.store.BackendStoreProvider;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBSessions;
@@ -28,6 +26,7 @@ import 
org.apache.hugegraph.backend.store.rocksdb.RocksDBStore;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBTables;
 import org.apache.hugegraph.config.HugeConfig;
 import org.apache.hugegraph.type.HugeType;
+import org.rocksdb.RocksDBException;
 
 public abstract class RocksDBSstStore extends RocksDBStore {
 
@@ -42,8 +41,7 @@ public abstract class RocksDBSstStore extends RocksDBStore {
                                               List<String> tableNames)
                                               throws RocksDBException {
         if (tableNames == null) {
-            return new RocksDBSstSessions(config, this.database(),
-                                          this.store(), dataPath);
+            return new RocksDBSstSessions(config, this.database(), 
this.store(), dataPath);
         } else {
             return new RocksDBSstSessions(config, this.database(), 
this.store(),
                                           dataPath, tableNames);
@@ -58,13 +56,10 @@ public abstract class RocksDBSstStore extends RocksDBStore {
                                     String database, String store) {
             super(provider, database, store);
 
-            registerTableManager(HugeType.VERTEX,
-                                 new RocksDBTables.Vertex(database));
+            registerTableManager(HugeType.VERTEX, new 
RocksDBTables.Vertex(database));
 
-            registerTableManager(HugeType.EDGE_OUT,
-                                 RocksDBTables.Edge.out(database));
-            registerTableManager(HugeType.EDGE_IN,
-                                 RocksDBTables.Edge.in(database));
+            registerTableManager(HugeType.EDGE_OUT, 
RocksDBTables.Edge.out(database));
+            registerTableManager(HugeType.EDGE_IN, 
RocksDBTables.Edge.in(database));
 
             registerTableManager(HugeType.SECONDARY_INDEX,
                                  new RocksDBTables.SecondaryIndex(database));
@@ -80,12 +75,9 @@ public abstract class RocksDBSstStore extends RocksDBStore {
                                  new RocksDBTables.RangeLongIndex(database));
             registerTableManager(HugeType.RANGE_DOUBLE_INDEX,
                                  new RocksDBTables.RangeDoubleIndex(database));
-            registerTableManager(HugeType.SEARCH_INDEX,
-                                 new RocksDBTables.SearchIndex(database));
-            registerTableManager(HugeType.SHARD_INDEX,
-                                 new RocksDBTables.ShardIndex(database));
-            registerTableManager(HugeType.UNIQUE_INDEX,
-                                 new RocksDBTables.UniqueIndex(database));
+            registerTableManager(HugeType.SEARCH_INDEX, new 
RocksDBTables.SearchIndex(database));
+            registerTableManager(HugeType.SHARD_INDEX, new 
RocksDBTables.ShardIndex(database));
+            registerTableManager(HugeType.UNIQUE_INDEX, new 
RocksDBTables.UniqueIndex(database));
         }
 
         @Override
@@ -95,20 +87,17 @@ public abstract class RocksDBSstStore extends RocksDBStore {
 
         @Override
         public Id nextId(HugeType type) {
-            throw new UnsupportedOperationException(
-                      "RocksDBSstGraphStore.nextId()");
+            throw new 
UnsupportedOperationException("RocksDBSstGraphStore.nextId()");
         }
 
         @Override
         public void increaseCounter(HugeType type, long increment) {
-            throw new UnsupportedOperationException(
-                      "RocksDBSstGraphStore.increaseCounter()");
+            throw new 
UnsupportedOperationException("RocksDBSstGraphStore.increaseCounter()");
         }
 
         @Override
         public long getCounter(HugeType type) {
-            throw new UnsupportedOperationException(
-                      "RocksDBSstGraphStore.getCounter()");
+            throw new 
UnsupportedOperationException("RocksDBSstGraphStore.getCounter()");
         }
     }
 }
diff --git 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/BaseRocksDBUnitTest.java
 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/BaseRocksDBUnitTest.java
index 9a9104412..5629938f9 100644
--- 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/BaseRocksDBUnitTest.java
+++ 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/BaseRocksDBUnitTest.java
@@ -23,23 +23,21 @@ import java.nio.ByteOrder;
 import java.util.ArrayList;
 
 import org.apache.commons.io.FileUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.rocksdb.RocksDBException;
-
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBSessions;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBStdSessions;
 import org.apache.hugegraph.config.HugeConfig;
 import org.apache.hugegraph.unit.BaseUnitTest;
 import org.apache.hugegraph.unit.FakeObjects;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.rocksdb.RocksDBException;
 
 public class BaseRocksDBUnitTest extends BaseUnitTest {
 
     private static final String TMP_DIR = System.getProperty("java.io.tmpdir");
     protected static final String DB_PATH = TMP_DIR + "/" + "rocksdb";
     protected static final String SNAPSHOT_PATH = TMP_DIR + "/" + "snapshot";
-
     protected static final String TABLE = "test-table";
 
     protected RocksDBSessions rocks;
@@ -74,10 +72,9 @@ public class BaseRocksDBUnitTest extends BaseUnitTest {
         return getString(this.rocks.session().get(TABLE, getBytes(key)));
     }
 
-    protected void clearData() throws RocksDBException {
+    protected void clearData() {
         for (String table : new ArrayList<>(this.rocks.openedTables())) {
-            this.rocks.session().deleteRange(table,
-                                             new byte[]{0}, new byte[]{-1});
+            this.rocks.session().deleteRange(table, new byte[]{0}, new 
byte[]{-1});
         }
         this.commit();
     }
@@ -119,7 +116,7 @@ public class BaseRocksDBUnitTest extends BaseUnitTest {
 
     private static void close(RocksDBSessions rocks) throws RocksDBException {
         for (String table : new ArrayList<>(rocks.openedTables())) {
-            if (table.equals("default")) {
+            if ("default".equals(table)) {
                 continue;
             }
             rocks.dropTable(table);
diff --git 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBCountersTest.java
 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBCountersTest.java
index 083cb1d3b..ee712f936 100644
--- 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBCountersTest.java
+++ 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBCountersTest.java
@@ -21,16 +21,15 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.junit.Before;
-import org.junit.Test;
-import org.rocksdb.RocksDBException;
-
 import org.apache.hugegraph.backend.id.Id;
 import org.apache.hugegraph.backend.id.IdGenerator;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBSessions.Session;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBTables;
 import org.apache.hugegraph.testutil.Assert;
 import org.apache.hugegraph.type.HugeType;
+import org.junit.Before;
+import org.junit.Test;
+import org.rocksdb.RocksDBException;
 
 public class RocksDBCountersTest extends BaseRocksDBUnitTest {
 
@@ -104,7 +103,7 @@ public class RocksDBCountersTest extends 
BaseRocksDBUnitTest {
     private Id nextId(Session session, HugeType type) {
         final int MAX_TIMES = 1000;
         // Do get-increase-get-compare operation
-        long counter = 0L;
+        long counter;
         long expect = -1L;
         synchronized (this) {
             for (int i = 0; i < MAX_TIMES; i++) {
diff --git 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBPerfTest.java
 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBPerfTest.java
index a6d94d1b8..fdab91e91 100644
--- 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBPerfTest.java
+++ 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBPerfTest.java
@@ -24,19 +24,17 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
-import org.junit.Test;
-import org.rocksdb.RocksDBException;
-
 import org.apache.hugegraph.backend.store.BackendEntry.BackendColumn;
 import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBSessions.Session;
+import org.junit.Test;
 
 public class RocksDBPerfTest extends BaseRocksDBUnitTest {
 
     private static final int TIMES = 10000 * 1000;
 
     @Test
-    public void testSeekExistKey() throws RocksDBException {
+    public void testSeekExistKey() {
         put("exist", "value");
 
         Session session = this.rocks.session();
@@ -49,7 +47,7 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testSeekNonExistKey() throws RocksDBException {
+    public void testSeekNonExistKey() {
         put("exist", "value");
 
         Session session = this.rocks.session();
@@ -62,7 +60,7 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testGetExistKey() throws RocksDBException {
+    public void testGetExistKey() {
         put("exist", "value");
 
         Session session = this.rocks.session();
@@ -73,7 +71,7 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testGetNonExistKey() throws RocksDBException {
+    public void testGetNonExistKey() {
         put("exist", "value");
 
         Session session = this.rocks.session();
@@ -84,14 +82,14 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testPut() throws RocksDBException {
+    public void testPut() {
         for (int i = 0; i < TIMES; i++) {
             put("person-" + i, "value-" + i);
         }
     }
 
     @Test
-    public void testGet3Keys() throws RocksDBException {
+    public void testGet3Keys() {
 
         put("person:1gname", "James");
         put("person:1gage", "19");
@@ -110,7 +108,7 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testMultiGet3Keys() throws RocksDBException {
+    public void testMultiGet3Keys() {
 
         put("person:1gname", "James");
         put("person:1gage", "19");
@@ -134,7 +132,7 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testGet1KeyWithMultiValues() throws RocksDBException {
+    public void testGet1KeyWithMultiValues() {
 
         put("person:1gname", "James");
         put("person:1gage", "19");
@@ -153,7 +151,7 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testScanByPrefix() throws RocksDBException {
+    public void testScanByPrefix() {
 
         put("person:1gname", "James");
         put("person:1gage", "19");
@@ -173,31 +171,31 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testGet3KeysWithData() throws RocksDBException {
+    public void testGet3KeysWithData() {
         testPut();
         testGet3Keys();
     }
 
     @Test
-    public void testMultiGet3KeysWithData() throws RocksDBException {
+    public void testMultiGet3KeysWithData() {
         testPut();
         testMultiGet3Keys();
     }
 
     @Test
-    public void testGet1KeyWithData() throws RocksDBException {
+    public void testGet1KeyWithData() {
         testPut();
         testGet1KeyWithMultiValues();
     }
 
     @Test
-    public void testScanByPrefixWithData() throws RocksDBException {
+    public void testScanByPrefixWithData() {
         testPut();
         testScanByPrefix();
     }
 
     @Test
-    public void testUpdate() throws RocksDBException {
+    public void testUpdate() {
         Session session = this.rocks.session();
 
         Random r = new Random();
@@ -231,7 +229,7 @@ public class RocksDBPerfTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testScanByPrefixAfterUpdate() throws RocksDBException {
+    public void testScanByPrefixAfterUpdate() {
         Session session = this.rocks.session();
 
         this.testUpdate();
diff --git 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBSessionTest.java
 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBSessionTest.java
index 839a0b3e0..94ffe2294 100644
--- 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBSessionTest.java
+++ 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBSessionTest.java
@@ -19,27 +19,27 @@ package org.apache.hugegraph.unit.rocksdb;
 
 import java.nio.ByteBuffer;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.hugegraph.unit.BaseUnitTest;
-import org.junit.Assume;
-import org.junit.Test;
-import org.rocksdb.RocksDBException;
-
 import org.apache.hugegraph.backend.store.BackendEntry.BackendColumn;
 import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBSessions.Session;
 import org.apache.hugegraph.testutil.Assert;
+import org.apache.hugegraph.unit.BaseUnitTest;
+import org.junit.Assume;
+import org.junit.Test;
+import org.rocksdb.RocksDBException;
 
 public class RocksDBSessionTest extends BaseRocksDBUnitTest {
 
     @Test
-    public void testPutAndGet() throws RocksDBException {
+    public void testPutAndGet() {
         String value = getString(this.rocks.session().get(TABLE, 
getBytes("person:1gname")));
-        Assert.assertEquals(null, value);
+        Assert.assertNull(value);
 
         this.rocks.session().put(TABLE, getBytes("person:1gname"), 
getBytes("James"));
         this.rocks.session().put(TABLE, getBytes("person:1gage"), 
getBytes(19));
@@ -57,9 +57,9 @@ public class RocksDBSessionTest extends BaseRocksDBUnitTest {
     }
 
     @Test
-    public void testPutAndMultiGet() throws RocksDBException {
-        BackendColumnIterator values = this.rocks.session().get(TABLE,
-                                       
Arrays.asList(getBytes("person:1gname")));
+    public void testPutAndMultiGet() {
+        BackendColumnIterator values =
+            this.rocks.session().get(TABLE, 
Collections.singletonList(getBytes("person:1gname")));
         Assert.assertFalse(values.hasNext());
 
         this.rocks.session().put(TABLE, getBytes("person:1gname"), 
getBytes("James"));
@@ -67,9 +67,8 @@ public class RocksDBSessionTest extends BaseRocksDBUnitTest {
         this.rocks.session().put(TABLE, getBytes("person:1gcity"), 
getBytes("Beijing"));
         this.commit();
 
-        values = this.rocks.session().get(TABLE, Arrays.asList(
-                                                 getBytes("person:1gname"),
-                                                 getBytes("person:1gage")));
+        values = this.rocks.session().get(TABLE, 
Arrays.asList(getBytes("person:1gname"),
+                                                               
getBytes("person:1gage")));
         Assert.assertTrue(values.hasNext());
         Assert.assertEquals("James", getString(values.next().value));
         Assert.assertEquals(19, getLong(values.next().value));
@@ -123,7 +122,7 @@ public class RocksDBSessionTest extends BaseRocksDBUnitTest 
{
     }
 
     @Test
-    public void testMergeWithCounter() throws RocksDBException {
+    public void testMergeWithCounter() {
         this.rocks.session().put(TABLE, getBytes("person:1gage"), 
getBytes(19));
         this.commit();
 
@@ -163,7 +162,7 @@ public class RocksDBSessionTest extends BaseRocksDBUnitTest 
{
     }
 
     @Test
-    public void testScanByAll() throws RocksDBException {
+    public void testScanByAll() {
         put("person:1gname", "James");
         put("person:2gname", "Lisa");
 
@@ -397,7 +396,7 @@ public class RocksDBSessionTest extends BaseRocksDBUnitTest 
{
         this.commit();
 
         Assert.assertEquals("James", get("person:1gname"));
-        Assert.assertEquals(null, get("person:1gage"));
+        Assert.assertNull(get("person:1gage"));
         Assert.assertEquals("Beijing", get("person:1gcity"));
     }
 
@@ -436,9 +435,9 @@ public class RocksDBSessionTest extends BaseRocksDBUnitTest 
{
         this.rocks.session().deletePrefix(TABLE, getBytes("person:1"));
         this.commit();
 
-        Assert.assertEquals(null, get("person:1gname"));
-        Assert.assertEquals(null, get("person:1gage"));
-        Assert.assertEquals(null, get("person:1gcity"));
+        Assert.assertNull(get("person:1gname"));
+        Assert.assertNull(get("person:1gage"));
+        Assert.assertNull(get("person:1gcity"));
 
         Assert.assertEquals("Lisa", get("person:2gname"));
     }
@@ -464,13 +463,13 @@ public class RocksDBSessionTest extends 
BaseRocksDBUnitTest {
         this.rocks.session().deleteRange(TABLE, getBytes("person:1"), 
getBytes("person:3"));
         this.commit();
 
-        Assert.assertEquals(null, get("person:1gname"));
-        Assert.assertEquals(null, get("person:1gage"));
-        Assert.assertEquals(null, get("person:1gcity"));
+        Assert.assertNull(get("person:1gname"));
+        Assert.assertNull(get("person:1gage"));
+        Assert.assertNull(get("person:1gcity"));
 
-        Assert.assertEquals(null, get("person:2gname"));
-        Assert.assertEquals(null, get("person:2gage"));
-        Assert.assertEquals(null, get("person:2gcity"));
+        Assert.assertNull(get("person:2gname"));
+        Assert.assertNull(get("person:2gage"));
+        Assert.assertNull(get("person:2gcity"));
 
         Assert.assertEquals("Hebe", get("person:3gname"));
         Assert.assertEquals("21", get("person:3gage"));
@@ -543,7 +542,7 @@ public class RocksDBSessionTest extends BaseRocksDBUnitTest 
{
     }
 
     @Test
-    public void testDeleteByRangeWithMinMaxByteValue() throws RocksDBException 
{
+    public void testDeleteByRangeWithMinMaxByteValue() {
         Session session = this.rocks.session();
 
         byte[] key11 = new byte[]{1, 0};
@@ -601,17 +600,17 @@ public class RocksDBSessionTest extends 
BaseRocksDBUnitTest {
         this.commit();
 
         Assert.assertEquals("James2", get("person:1gname"));
-        Assert.assertEquals(null, get("person:1gage"));
+        Assert.assertNull(get("person:1gage"));
 
         // deleteSingle after put twice
         this.rocks.session().deleteSingle(TABLE, getBytes("person:1gname"));
         this.commit();
 
         // NOTE: maybe return "James" here
-        Assert.assertEquals(null, get("person:1gname"));
+        Assert.assertNull(get("person:1gname"));
         Assert.assertTrue(null == get("person:1gname") ||
                           "James".equals(get("person:1gname")));
-        Assert.assertEquals(null, get("person:1gage"));
+        Assert.assertNull(get("person:1gage"));
     }
 
     @Test
@@ -628,13 +627,13 @@ public class RocksDBSessionTest extends 
BaseRocksDBUnitTest {
         this.commit();
 
         Assert.assertEquals("James", get("person:1gname"));
-        Assert.assertEquals(null, get("person:1gage"));
+        Assert.assertNull(get("person:1gage"));
         Assert.assertEquals("Beijing", get("person:1gcity"));
 
         this.rocks.session().compactRange(TABLE);
 
         Assert.assertEquals("James", get("person:1gname"));
-        Assert.assertEquals(null, get("person:1gage"));
+        Assert.assertNull(get("person:1gage"));
         Assert.assertEquals("Beijing", get("person:1gcity"));
     }
 
diff --git 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBSessionsTest.java
 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBSessionsTest.java
index 37e1472c4..aa74d9cd6 100644
--- 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBSessionsTest.java
+++ 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/rocksdb/RocksDBSessionsTest.java
@@ -21,9 +21,6 @@ import java.io.File;
 import java.io.IOException;
 
 import org.apache.commons.io.FileUtils;
-import org.junit.Test;
-import org.rocksdb.RocksDBException;
-
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBMetrics;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBOptions;
 import org.apache.hugegraph.backend.store.rocksdb.RocksDBSessions;
@@ -32,6 +29,9 @@ import 
org.apache.hugegraph.backend.store.rocksdbsst.RocksDBSstSessions;
 import org.apache.hugegraph.config.HugeConfig;
 import org.apache.hugegraph.testutil.Assert;
 import org.apache.hugegraph.unit.FakeObjects;
+import org.junit.Test;
+import org.rocksdb.RocksDBException;
+
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 
@@ -162,9 +162,7 @@ public class RocksDBSessionsTest extends 
BaseRocksDBUnitTest {
         HugeConfig config = FakeObjects.newConfig();
         String sstPath = DB_PATH + "/sst";
         config.addProperty(RocksDBOptions.SST_PATH.name(), sstPath);
-        RocksDBSstSessions sstSessions = new RocksDBSstSessions(config,
-                                                                "sst", "store",
-                                                                sstPath);
+        RocksDBSstSessions sstSessions = new RocksDBSstSessions(config, "sst", 
"store", sstPath);
         final String TABLE1 = "test-table1";
         final String TABLE2 = "test-table2";
         sstSessions.createTable(TABLE1);
@@ -192,8 +190,7 @@ public class RocksDBSessionsTest extends 
BaseRocksDBUnitTest {
         Assert.assertFalse(sstSessions.existsTable(TABLE1));
         Assert.assertFalse(sstSessions.existsTable(TABLE2));
 
-        RocksDBSessions rocks = new RocksDBStdSessions(config, "db", "store",
-                                                       sstPath, sstPath);
+        RocksDBSessions rocks = new RocksDBStdSessions(config, "db", "store", 
sstPath, sstPath);
         // Will ingest sst file of TABLE1
         rocks.createTable(TABLE1);
         Assert.assertEquals(ImmutableList.of("1000"),
diff --git 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinaryBackendEntryTest.java
 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinaryBackendEntryTest.java
index 2ead8ba58..97b55e30b 100644
--- 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinaryBackendEntryTest.java
+++ 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinaryBackendEntryTest.java
@@ -17,13 +17,13 @@
 
 package org.apache.hugegraph.unit.serializer;
 
-import org.junit.Test;
-
 import org.apache.hugegraph.backend.serializer.BinaryBackendEntry;
 import org.apache.hugegraph.backend.store.BackendEntry.BackendColumn;
 import org.apache.hugegraph.testutil.Assert;
 import org.apache.hugegraph.type.HugeType;
 import org.apache.hugegraph.unit.BaseUnitTest;
+import org.junit.Test;
+
 import com.google.common.collect.ImmutableList;
 
 public class BinaryBackendEntryTest extends BaseUnitTest {
diff --git 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinaryScatterSerializerTest.java
 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinaryScatterSerializerTest.java
index 28a6a219d..abc1a92fa 100644
--- 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinaryScatterSerializerTest.java
+++ 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinaryScatterSerializerTest.java
@@ -17,18 +17,17 @@
 
 package org.apache.hugegraph.unit.serializer;
 
-import org.apache.hugegraph.config.HugeConfig;
-import org.junit.Test;
-
 import org.apache.hugegraph.backend.serializer.BinaryBackendEntry;
 import org.apache.hugegraph.backend.serializer.BinaryScatterSerializer;
 import org.apache.hugegraph.backend.store.BackendEntry;
+import org.apache.hugegraph.config.HugeConfig;
 import org.apache.hugegraph.structure.HugeEdge;
 import org.apache.hugegraph.structure.HugeVertex;
 import org.apache.hugegraph.testutil.Assert;
 import org.apache.hugegraph.testutil.Whitebox;
 import org.apache.hugegraph.unit.BaseUnitTest;
 import org.apache.hugegraph.unit.FakeObjects;
+import org.junit.Test;
 
 public class BinaryScatterSerializerTest extends BaseUnitTest {
 
@@ -84,8 +83,7 @@ public class BinaryScatterSerializerTest extends BaseUnitTest 
{
 
     private static BackendEntry parse(BackendEntry originEntry) {
         byte[] bytes = originEntry.id().asBytes();
-        BackendEntry parsedEntry = new BinaryBackendEntry(originEntry.type(),
-                                                          bytes);
+        BackendEntry parsedEntry = new BinaryBackendEntry(originEntry.type(), 
bytes);
         parsedEntry.columns(originEntry.columns());
         return parsedEntry;
     }
diff --git 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinarySerializerTest.java
 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinarySerializerTest.java
index 3eb269a26..59e77eb5d 100644
--- 
a/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinarySerializerTest.java
+++ 
b/hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/unit/serializer/BinarySerializerTest.java
@@ -17,17 +17,16 @@
 
 package org.apache.hugegraph.unit.serializer;
 
-import org.apache.hugegraph.config.HugeConfig;
-import org.junit.Test;
-
 import org.apache.hugegraph.backend.serializer.BinarySerializer;
 import org.apache.hugegraph.backend.store.BackendEntry;
+import org.apache.hugegraph.config.HugeConfig;
 import org.apache.hugegraph.structure.HugeEdge;
 import org.apache.hugegraph.structure.HugeVertex;
 import org.apache.hugegraph.testutil.Assert;
 import org.apache.hugegraph.testutil.Whitebox;
 import org.apache.hugegraph.unit.BaseUnitTest;
 import org.apache.hugegraph.unit.FakeObjects;
+import org.junit.Test;
 
 public class BinarySerializerTest extends BaseUnitTest {
 

Reply via email to