This is an automated email from the ASF dual-hosted git repository.

zyk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/master by this push:
     new ac02e0db03c Rename Schema_File to PB_Tree (#10186)
ac02e0db03c is described below

commit ac02e0db03caa2df9c1f6cedc92787645c9f5c18
Author: Chen YZ <[email protected]>
AuthorDate: Fri Jun 16 20:22:13 2023 +0800

    Rename Schema_File to PB_Tree (#10186)
---
 .../Data-Modeling/SchemaRegion-rocksdb.md          |   2 +-
 .../Maintenance-Tools/SchemaFileSketch-Tool.md     |  10 +-
 docs/UserGuide/Reference/Common-Config-Manual.md   |  12 +-
 docs/UserGuide/Reference/Status-Codes.md           | 260 ++++++++++-----------
 .../Data-Modeling/SchemaRegion-rocksdb.md          |   2 +-
 .../Maintenance-Tools/SchemaFileSketch-Tool.md     |  10 +-
 .../zh/UserGuide/Reference/Common-Config-Manual.md |   2 +-
 docs/zh/UserGuide/Reference/Status-Codes.md        | 260 ++++++++++-----------
 .../db/it/schema/IoTDBSortedShowTimeseriesIT.java  |   2 +-
 .../org/apache/iotdb/db/it/schema/IoTDBTagIT.java  |   2 +-
 .../org/apache/iotdb/util/AbstractSchemaIT.java    |  14 +-
 iotdb-client/client-cpp/src/main/Session.h         |   4 +-
 .../java/org/apache/iotdb/rpc/TSStatusCode.java    |   4 +-
 .../resources/conf/iotdb-common.properties         |   2 +-
 ...rint-schema-file.bat => print-pb-tree-file.bat} |   2 +-
 ...{print-schema-file.sh => print-pb-tree-file.sh} |   0
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |  44 ++--
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |  21 +-
 .../SchemaFileLogCorruptedException.java           |   4 +-
 .../metadata/schemafile/SchemaFileNotExists.java   |   4 +-
 .../schemafile/SchemaPageOverflowException.java    |   3 +-
 .../apache/iotdb/db/metadata/MetadataConstant.java |   8 +-
 .../idtable/AppendOnlyDiskSchemaManager.java       |   2 +-
 .../metadata/metric/SchemaEngineCachedMetric.java  |   1 +
 .../db/metadata/mtree/store/CachedMTreeStore.java  |  12 +-
 .../mtree/store/disk/cache/CacheMemoryManager.java |   2 +-
 .../ReleaseFlushStrategyNumBasedImpl.java          |   2 +-
 .../mtree/store/disk/schemafile/ISchemaFile.java   |   4 +-
 .../mtree/store/disk/schemafile/SchemaFile.java    |  34 +--
 .../store/disk/schemafile/SchemaFileConfig.java    |  14 +-
 .../disk/schemafile/pagemgr/BTreePageManager.java  |   2 +-
 .../store/disk/schemafile/pagemgr/PageManager.java |   2 +-
 .../rescon/CachedSchemaEngineStatistics.java       |   2 +-
 .../rescon/CachedSchemaRegionStatistics.java       |   2 +-
 .../db/metadata/rescon/SchemaResourceManager.java  |   4 +-
 .../db/metadata/schemaregion/SchemaEngine.java     |   2 +-
 .../db/metadata/schemaregion/SchemaEngineMode.java |   2 +-
 ...maFileImpl.java => SchemaRegionPBTreeImpl.java} |  46 ++--
 .../protocol/influxdb/util/QueryResultUtils.java   |   2 +-
 ...leSketchTool.java => PBTreeFileSketchTool.java} |  10 +-
 .../mtree/schemafile/AliasIndexPageTest.java       |   2 +-
 .../mtree/schemafile/InternalPageTest.java         |   2 +-
 .../mtree/schemafile/SchemaFileLogTest.java        |  15 +-
 .../metadata/mtree/schemafile/SchemaFileTest.java  |   2 +-
 .../mtree/schemafile/WrappedSegmentTest.java       |   2 +-
 .../schemaRegion/AbstractSchemaRegionTest.java     |  12 +-
 .../schemaRegion/SchemaRegionBasicTest.java        |   4 +-
 .../schemaRegion/SchemaStatisticsTest.java         |   2 +-
 ...leSketchTest.java => PBTreeFileSketchTest.java} |  10 +-
 .../src/test/resources/iotdb-datanode.properties   |   4 +-
 50 files changed, 437 insertions(+), 435 deletions(-)

diff --git a/docs/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md 
b/docs/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md
index d1f71ac0122..4caef952ed6 100644
--- a/docs/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md
+++ b/docs/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md
@@ -44,7 +44,7 @@ Rocksdb_based. Restart the IoTDB, the system will use 
`RSchemaRegion` to manage
 ####################
 ### Schema Engine Configuration
 ####################
-# Choose the mode of schema engine. The value could be Memory,Schema_File and 
Rocksdb_based. If the provided value doesn't match any pre-defined value, 
Memory mode will be used as default.
+# Choose the mode of schema engine. The value could be Memory,PB_Tree and 
Rocksdb_based. If the provided value doesn't match any pre-defined value, 
Memory mode will be used as default.
 # Datatype: string
 schema_engine_mode=Rocksdb_based
 
diff --git a/docs/UserGuide/Maintenance-Tools/SchemaFileSketch-Tool.md 
b/docs/UserGuide/Maintenance-Tools/SchemaFileSketch-Tool.md
index ca5b85ec457..74262d39ce0 100644
--- a/docs/UserGuide/Maintenance-Tools/SchemaFileSketch-Tool.md
+++ b/docs/UserGuide/Maintenance-Tools/SchemaFileSketch-Tool.md
@@ -19,20 +19,20 @@
 
 -->
 
-## SchemaFileSketch Tool
+## PBTreeFileSketch Tool
 
-Since version 0.14.0, IoTDB could store schema into a persistent slotted file.
+Since version 1.1, IoTDB could store schema into a persistent slotted file.
 
-If you want to parse schema file into a human-readable way, you can use this 
tool to parse the specified schema file.
+If you want to parse PB-Tree file into a human-readable way, you can use this 
tool to parse the specified PB-Tree file.
 
 The tool can sketch .pst file.
 
 ### How to use
 
 Linux/MacOS
-> ./print-schema-file.sh -f your/path/to/schema_file.pst -o 
/your/path/to/sketch.txt
+> ./print-pb-tree-file.sh -f your/path/to/pb_tree.pst -o 
/your/path/to/sketch.txt
 
 Windows
 
-> ./print-schema-file.bat -f your/path/to/schema_file.pst -o 
/your/path/to/sketch.txt
+> ./print-pb-tree-file.bat -f your/path/to/pb_tree.pst -o 
/your/path/to/sketch.txt
 
diff --git a/docs/UserGuide/Reference/Common-Config-Manual.md 
b/docs/UserGuide/Reference/Common-Config-Manual.md
index fb131446be2..b0bc78c76a7 100644
--- a/docs/UserGuide/Reference/Common-Config-Manual.md
+++ b/docs/UserGuide/Reference/Common-Config-Manual.md
@@ -373,12 +373,12 @@ Different configuration parameters take effect in the 
following three ways:
 
 * schema\_engine\_mode
 
-|名字| schema\_engine\_mode |
-|:---:|:---|
-|Description| Schema engine mode, supporting Memory and Schema_File modes; 
Schema_File mode support evict the timeseries schema temporarily not used in 
memory at runtime, and load it into memory from disk when needed. This 
parameter must be the same on all DataNodes in one cluster.|
-|Type| string |
-|Default| Memory |
-|Effective| Only allowed to be modified in first start up |
+|名字| schema\_engine\_mode                                                      
                                                                                
                                                                                
                               |
+|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+|Description| Schema engine mode, supporting Memory and PB_Tree modes; PB_Tree 
mode support evict the timeseries schema temporarily not used in memory at 
runtime, and load it into memory from disk when needed. This parameter must be 
the same on all DataNodes in one cluster. |
+|Type| string                                                                  
                                                                                
                                                                                
                                 |
+|Default| Memory                                                               
                                                                                
                                                                                
                                    |
+|Effective| Only allowed to be modified in first start up                      
                                                                                
                                                                                
                                      |
 
 * mlog\_buffer\_size
 
diff --git a/docs/UserGuide/Reference/Status-Codes.md 
b/docs/UserGuide/Reference/Status-Codes.md
index 2bef81d33ae..b36e0714e67 100644
--- a/docs/UserGuide/Reference/Status-Codes.md
+++ b/docs/UserGuide/Reference/Status-Codes.md
@@ -42,136 +42,136 @@ With Status Code, instead of writing codes like `if 
(e.getErrorMessage().contain
 
 Here is a list of Status Code and related message:
 
-| Status Code | Status Type                       | Meanings                   
                                                               |
-|:------------|:----------------------------------|:------------------------------------------------------------------------------------------|
-| 200         | SUCCESS_STATUS                    |                            
                                                               |
-| 201         | INCOMPATIBLE_VERSION              | Incompatible version       
                                                               |
-| 202         | CONFIGURATION_ERROR               | Configuration error        
                                                               |
-| 203         | START_UP_ERROR                    | Meet error while starting  
                                                               |
-| 204         | SHUT_DOWN_ERROR                   | Meet error while shutdown  
                                                               |
-| 300         | UNSUPPORTED_OPERATION             | Unsupported operation      
                                                               |
-| 301         | EXECUTE_STATEMENT_ERROR           | Execute statement error    
                                                               |
-| 302         | MULTIPLE_ERROR                    | Meet error when executing 
multiple statements                                             |
-| 303         | ILLEGAL_PARAMETER                 | Parameter is illegal       
                                                               |
-| 304         | OVERLAP_WITH_EXISTING_TASK        | Current task has some 
conflict with existing tasks                                        |
-| 305         | INTERNAL_SERVER_ERROR             | Internal server error      
                                                               |
-| 306         | DISPATCH_ERROR                    | Meet error while 
dispatching                                                              |
-| 400         | REDIRECTION_RECOMMEND             | Recommend Client 
redirection                                                              |
-| 500         | DATABASE_NOT_EXIST                | Database does not exist    
                                                               |
-| 501         | DATABASE_ALREADY_EXISTS           | Database already exist     
                                                               |
-| 502         | SERIES_OVERFLOW                   | Series number exceeds the 
threshold                                                       |
-| 503         | TIMESERIES_ALREADY_EXIST          | Timeseries already exists  
                                                               |
-| 504         | TIMESERIES_IN_BLACK_LIST          | Timeseries is being 
deleted                                                               |
-| 505         | ALIAS_ALREADY_EXIST               | Alias already exists       
                                                               |
-| 506         | PATH_ALREADY_EXIST                | Path already exists        
                                                               |
-| 507         | METADATA_ERROR                    | Meet error when dealing 
with metadata                                                     |
-| 508         | PATH_NOT_EXIST                    | Path does not exist        
                                                               |
-| 509         | ILLEGAL_PATH                      | Illegal path               
                                                               |
-| 510         | CREATE_TEMPLATE_ERROR             | Create schema template 
error                                                              |
-| 511         | DUPLICATED_TEMPLATE               | Schema template is 
duplicated                                                             |
-| 512         | UNDEFINED_TEMPLATE                | Schema template is not 
defined                                                            |
-| 513         | TEMPLATE_NOT_SET                  | Schema template is not set 
                                                               |
-| 514         | DIFFERENT_TEMPLATE                | Template is not consistent 
                                                               |
-| 515         | TEMPLATE_IS_IN_USE                | Template is in use         
                                                               |
-| 516         | TEMPLATE_INCOMPATIBLE             | Template is not compatible 
                                                               |
-| 517         | SEGMENT_NOT_FOUND                 | Segment not found          
                                                               |
-| 518         | PAGE_OUT_OF_SPACE                 | No enough space on schema 
page                                                            |
-| 519         | RECORD_DUPLICATED                 | Record is duplicated       
                                                               |
-| 520         | SEGMENT_OUT_OF_SPACE              | No enough space on schema 
segment                                                         |
-| 521         | SCHEMA_FILE_NOT_EXISTS            | SchemaFile does not exist  
                                                               |
-| 522         | OVERSIZE_RECORD                   | Size of record exceeds the 
threshold of page of SchemaFile                                |
-| 523         | SCHEMA_FILE_REDO_LOG_BROKEN       | SchemaFile redo log has 
broken                                                            |
-| 524         | TEMPLATE_NOT_ACTIVATED            | Schema template is not 
activated                                                          |
-| 526         | SCHEMA_QUOTA_EXCEEDED             | Schema usage exceeds quota 
limit                                                          |
-| 527  | MEASUREMENT_ALREADY_EXISTS_IN_TEMPLATE            | Measurement 
already exists in schema template                                    | 
-| 600         | SYSTEM_READ_ONLY                  | IoTDB system is read only  
                                                               |
-| 601         | STORAGE_ENGINE_ERROR              | Storage engine related 
error                                                              |
-| 602         | STORAGE_ENGINE_NOT_READY          | The storage engine is in 
recovery, not ready fore accepting read/write operation          |
-| 603         | DATAREGION_PROCESS_ERROR          | DataRegion related error   
                                                               |
-| 604         | TSFILE_PROCESSOR_ERROR            | TsFile processor related 
error                                                            |
-| 605         | WRITE_PROCESS_ERROR               | Writing data related error 
                                                               |
-| 606         | WRITE_PROCESS_REJECT              | Writing data rejected 
error                                                               |
-| 607         | OUT_OF_TTL                        | Insertion time is less 
than TTL time bound                                                |
-| 608         | COMPACTION_ERROR                  | Meet error while merging   
                                                               |
-| 609         | ALIGNED_TIMESERIES_ERROR          | Meet error in aligned 
timeseries                                                          |
-| 610         | WAL_ERROR                         | WAL error                  
                                                               |
-| 611         | DISK_SPACE_INSUFFICIENT           | Disk space is insufficient 
                                                               |
-| 700         | SQL_PARSE_ERROR                   | Meet error while parsing 
SQL                                                              |
-| 701         | SEMANTIC_ERROR                    | SQL semantic error         
                                                               |
-| 702         | GENERATE_TIME_ZONE_ERROR          | Meet error while 
generating time zone                                                     |
-| 703         | SET_TIME_ZONE_ERROR               | Meet error while setting 
time zone                                                        |
-| 704         | QUERY_NOT_ALLOWED                 | Query statements are not 
allowed error                                                    |
-| 705         | LOGICAL_OPERATOR_ERROR            | Logical operator related 
error                                                            |
-| 706         | LOGICAL_OPTIMIZE_ERROR            | Logical optimize related 
error                                                            |
-| 707         | UNSUPPORTED_FILL_TYPE             | Unsupported fill type 
related error                                                       |
-| 708         | QUERY_PROCESS_ERROR               | Query process related 
error                                                               |
-| 709         | MPP_MEMORY_NOT_ENOUGH             | Not enough memory for task 
execution in MPP                                               |
-| 710         | CLOSE_OPERATION_ERROR             | Meet error in close 
operation                                                             |
-| 711         | TSBLOCK_SERIALIZE_ERROR           | TsBlock serialization 
error                                                               |
-| 712         | INTERNAL_REQUEST_TIME_OUT         | MPP Operation timeout      
                                                               |
-| 713         | INTERNAL_REQUEST_RETRY_ERROR      | Internal operation retry 
failed                                                           |
-| 714         | NO_SUCH_QUERY                     | Cannot find target query   
                                                               |
-| 715         | QUERY_WAS_KILLED                  | Query was killed when 
execute                                                             |
-| 800         | UNINITIALIZED_AUTH_ERROR          | Failed to initialize auth 
module                                                          |
-| 801         | WRONG_LOGIN_PASSWORD              | Username or password is 
wrong                                                             |
-| 802         | NOT_LOGIN                         | Not login                  
                                                               |
-| 803         | NO_PERMISSION                     | No permisstion to operate  
                                                               |
-| 804         | USER_NOT_EXIST                    | User not exists            
                                                               |
-| 805         | USER_ALREADY_EXIST                | User already exists        
                                                               |
-| 806         | USER_ALREADY_HAS_ROLE             | User already has target 
role                                                              |
-| 807         | USER_NOT_HAS_ROLE                 | User not has target role   
                                                               |
-| 808         | ROLE_NOT_EXIST                    | Role not exists            
                                                               |
-| 809         | ROLE_ALREADY_EXIST                | Role already exists        
                                                               |
-| 810         | ALREADY_HAS_PRIVILEGE             | Already has privilege      
                                                               |
-| 811         | NOT_HAS_PRIVILEGE                 | Not has privilege          
                                                               |
-| 812         | CLEAR_PERMISSION_CACHE_ERROR      | Failed to clear permission 
cache                                                          |
-| 813         | UNKNOWN_AUTH_PRIVILEGE            | Unknown auth privilege     
                                                               |
-| 814         | UNSUPPORTED_AUTH_OPERATION        | Unsupported auth operation 
                                                               |
-| 815         | AUTH_IO_EXCEPTION                 | IO Exception in auth 
module                                                               |
-| 900         | MIGRATE_REGION_ERROR              | Error when migrate region  
                                                               |
-| 901         | CREATE_REGION_ERROR               | Create region error        
                                                               |
-| 902         | DELETE_REGION_ERROR               | Delete region error        
                                                               |
-| 903         | PARTITION_CACHE_UPDATE_ERROR      | Update partition cache 
failed                                                             |
-| 904         | CONSENSUS_NOT_INITIALIZED         | Consensus is not 
initialized and cannot provide service                                   |
-| 905         | REGION_LEADER_CHANGE_ERROR        | Region leader migration 
failed                                                            |
-| 906         | NO_AVAILABLE_REGION_GROUP         | Cannot find an available 
region group                                                     |
-| 907         | LACK_DATA_PARTITION_ALLOCATION    | Lacked some data partition 
allocation result in the response                              |
-| 1000        | DATANODE_ALREADY_REGISTERED       | DataNode already 
registered in cluster                                                    |
-| 1001        | NO_ENOUGH_DATANODE                | The number of DataNode is 
not enough, cannot remove DataNode or create enough replication |
-| 1002        | ADD_CONFIGNODE_ERROR              | Add ConfigNode error       
                                                               |
-| 1003        | REMOVE_CONFIGNODE_ERROR           | Remove ConfigNode error    
                                                               |
-| 1004        | DATANODE_NOT_EXIST                | DataNode not exist error   
                                                               |
-| 1005        | DATANODE_STOP_ERROR               | DataNode stop error        
                                                               |
-| 1006        | REMOVE_DATANODE_ERROR             | Remove datanode failed     
                                                               |
-| 1007        | REGISTER_DATANODE_WITH_WRONG_ID   | The DataNode to be 
registered has incorrect register id                                   |
-| 1008        | CAN_NOT_CONNECT_DATANODE          | Can not connect to 
DataNode                                                               |
-| 1100        | LOAD_FILE_ERROR                   | Meet error while loading 
file                                                             |
-| 1101        | LOAD_PIECE_OF_TSFILE_ERROR        | Error when load a piece of 
TsFile when loading                                            |
-| 1102        | DESERIALIZE_PIECE_OF_TSFILE_ERROR | Error when deserialize a 
piece of TsFile                                                  |
-| 1103        | SYNC_CONNECTION_ERROR             | Sync connection error      
                                                               |
-| 1104        | SYNC_FILE_REDIRECTION_ERROR       | Sync TsFile redirection 
error                                                             |
-| 1105        | SYNC_FILE_ERROR                   | Sync TsFile error          
                                                               |
-| 1106        | CREATE_PIPE_SINK_ERROR            | Failed to create a PIPE 
sink                                                              |
-| 1107        | PIPE_ERROR                        | PIPE error                 
                                                               |
-| 1108        | PIPESERVER_ERROR                  | PIPE server error          
                                                               |
-| 1109        | VERIFY_METADATA_ERROR             | Meet error in validate 
timeseries schema                                                  |
-| 1200        | UDF_LOAD_CLASS_ERROR              | Error when loading UDF 
class                                                              |
-| 1201        | UDF_DOWNLOAD_ERROR                | DataNode cannot download 
UDF from ConfigNode                                              |
-| 1202        | CREATE_UDF_ON_DATANODE_ERROR      | Error when create UDF on 
DataNode                                                         |
-| 1203        | DROP_UDF_ON_DATANODE_ERROR        | Error when drop a UDF on 
DataNode                                                         |
-| 1300        | CREATE_TRIGGER_ERROR              | ConfigNode create trigger 
error                                                           |
-| 1301        | DROP_TRIGGER_ERROR                | ConfigNode delete Trigger 
error                                                           |
-| 1302        | TRIGGER_FIRE_ERROR                | Error when firing trigger  
                                                               |
-| 1303        | TRIGGER_LOAD_CLASS_ERROR          | Error when load class of 
trigger                                                          |
-| 1304        | TRIGGER_DOWNLOAD_ERROR            | Error when download 
trigger from ConfigNode                                               |
-| 1305        | CREATE_TRIGGER_INSTANCE_ERROR     | Error when create trigger 
instance                                                        |
-| 1306        | ACTIVE_TRIGGER_INSTANCE_ERROR     | Error when activate 
trigger instance                                                      |
-| 1307        | DROP_TRIGGER_INSTANCE_ERROR       | Error when drop trigger 
instance                                                          |
-| 1308        | UPDATE_TRIGGER_LOCATION_ERROR     | Error when move stateful 
trigger to new datanode                                          |
-| 1400        | NO_SUCH_CQ                        | CQ task does not exist     
                                                               |
-| 1401        | CQ_ALREADY_ACTIVE                 | CQ is already active       
                                                               |
-| 1402        | CQ_AlREADY_EXIST                  | CQ is already exist        
                                                               |
-| 1403        | CQ_UPDATE_LAST_EXEC_TIME_ERROR    | CQ update last execution 
time failed                                                      |
+| Status Code | Status Type                            | Meanings              
                                                                    |
+|:------------|:---------------------------------------|:------------------------------------------------------------------------------------------|
+| 200         | SUCCESS_STATUS                         |                       
                                                                    |
+| 201         | INCOMPATIBLE_VERSION                   | Incompatible version  
                                                                    |
+| 202         | CONFIGURATION_ERROR                    | Configuration error   
                                                                    |
+| 203         | START_UP_ERROR                         | Meet error while 
starting                                                                 |
+| 204         | SHUT_DOWN_ERROR                        | Meet error while 
shutdown                                                                 |
+| 300         | UNSUPPORTED_OPERATION                  | Unsupported operation 
                                                                    |
+| 301         | EXECUTE_STATEMENT_ERROR                | Execute statement 
error                                                                   |
+| 302         | MULTIPLE_ERROR                         | Meet error when 
executing multiple statements                                             |
+| 303         | ILLEGAL_PARAMETER                      | Parameter is illegal  
                                                                    |
+| 304         | OVERLAP_WITH_EXISTING_TASK             | Current task has some 
conflict with existing tasks                                        |
+| 305         | INTERNAL_SERVER_ERROR                  | Internal server error 
                                                                    |
+| 306         | DISPATCH_ERROR                         | Meet error while 
dispatching                                                              |
+| 400         | REDIRECTION_RECOMMEND                  | Recommend Client 
redirection                                                              |
+| 500         | DATABASE_NOT_EXIST                     | Database does not 
exist                                                                   |
+| 501         | DATABASE_ALREADY_EXISTS                | Database already 
exist                                                                    |
+| 502         | SERIES_OVERFLOW                        | Series number exceeds 
the threshold                                                       |
+| 503         | TIMESERIES_ALREADY_EXIST               | Timeseries already 
exists                                                                 |
+| 504         | TIMESERIES_IN_BLACK_LIST               | Timeseries is being 
deleted                                                               |
+| 505         | ALIAS_ALREADY_EXIST                    | Alias already exists  
                                                                    |
+| 506         | PATH_ALREADY_EXIST                     | Path already exists   
                                                                    |
+| 507         | METADATA_ERROR                         | Meet error when 
dealing with metadata                                                     |
+| 508         | PATH_NOT_EXIST                         | Path does not exist   
                                                                    |
+| 509         | ILLEGAL_PATH                           | Illegal path          
                                                                    |
+| 510         | CREATE_TEMPLATE_ERROR                  | Create schema 
template error                                                              |
+| 511         | DUPLICATED_TEMPLATE                    | Schema template is 
duplicated                                                             |
+| 512         | UNDEFINED_TEMPLATE                     | Schema template is 
not defined                                                            |
+| 513         | TEMPLATE_NOT_SET                       | Schema template is 
not set                                                                |
+| 514         | DIFFERENT_TEMPLATE                     | Template is not 
consistent                                                                |
+| 515         | TEMPLATE_IS_IN_USE                     | Template is in use    
                                                                    |
+| 516         | TEMPLATE_INCOMPATIBLE                  | Template is not 
compatible                                                                |
+| 517         | SEGMENT_NOT_FOUND                      | Segment not found     
                                                                    |
+| 518         | PAGE_OUT_OF_SPACE                      | No enough space on 
schema page                                                            |
+| 519         | RECORD_DUPLICATED                      | Record is duplicated  
                                                                    |
+| 520         | SEGMENT_OUT_OF_SPACE                   | No enough space on 
schema segment                                                         |
+| 521         | PB_TREE_FILE_NOT_EXISTS                | PBTreeFile does not 
exist                                                                 |
+| 522         | OVERSIZE_RECORD                        | Size of record 
exceeds the threshold of page of PBTreeFile                                |
+| 523         | PB_TREE_FILE_REDO_LOG_BROKEN           | PBTreeFile redo log 
has broken                                                            |
+| 524         | TEMPLATE_NOT_ACTIVATED                 | Schema template is 
not activated                                                          |
+| 526         | SCHEMA_QUOTA_EXCEEDED                  | Schema usage exceeds 
quota limit                                                          |
+| 527  | MEASUREMENT_ALREADY_EXISTS_IN_TEMPLATE | Measurement already exists 
in schema template                                    | 
+| 600         | SYSTEM_READ_ONLY                       | IoTDB system is read 
only                                                                 |
+| 601         | STORAGE_ENGINE_ERROR                   | Storage engine 
related error                                                              |
+| 602         | STORAGE_ENGINE_NOT_READY               | The storage engine is 
in recovery, not ready fore accepting read/write operation          |
+| 603         | DATAREGION_PROCESS_ERROR               | DataRegion related 
error                                                                  |
+| 604         | TSFILE_PROCESSOR_ERROR                 | TsFile processor 
related error                                                            |
+| 605         | WRITE_PROCESS_ERROR                    | Writing data related 
error                                                                |
+| 606         | WRITE_PROCESS_REJECT                   | Writing data rejected 
error                                                               |
+| 607         | OUT_OF_TTL                             | Insertion time is 
less than TTL time bound                                                |
+| 608         | COMPACTION_ERROR                       | Meet error while 
merging                                                                  |
+| 609         | ALIGNED_TIMESERIES_ERROR               | Meet error in aligned 
timeseries                                                          |
+| 610         | WAL_ERROR                              | WAL error             
                                                                    |
+| 611         | DISK_SPACE_INSUFFICIENT                | Disk space is 
insufficient                                                                |
+| 700         | SQL_PARSE_ERROR                        | Meet error while 
parsing SQL                                                              |
+| 701         | SEMANTIC_ERROR                         | SQL semantic error    
                                                                    |
+| 702         | GENERATE_TIME_ZONE_ERROR               | Meet error while 
generating time zone                                                     |
+| 703         | SET_TIME_ZONE_ERROR                    | Meet error while 
setting time zone                                                        |
+| 704         | QUERY_NOT_ALLOWED                      | Query statements are 
not allowed error                                                    |
+| 705         | LOGICAL_OPERATOR_ERROR                 | Logical operator 
related error                                                            |
+| 706         | LOGICAL_OPTIMIZE_ERROR                 | Logical optimize 
related error                                                            |
+| 707         | UNSUPPORTED_FILL_TYPE                  | Unsupported fill type 
related error                                                       |
+| 708         | QUERY_PROCESS_ERROR                    | Query process related 
error                                                               |
+| 709         | MPP_MEMORY_NOT_ENOUGH                  | Not enough memory for 
task execution in MPP                                               |
+| 710         | CLOSE_OPERATION_ERROR                  | Meet error in close 
operation                                                             |
+| 711         | TSBLOCK_SERIALIZE_ERROR                | TsBlock serialization 
error                                                               |
+| 712         | INTERNAL_REQUEST_TIME_OUT              | MPP Operation timeout 
                                                                    |
+| 713         | INTERNAL_REQUEST_RETRY_ERROR           | Internal operation 
retry failed                                                           |
+| 714         | NO_SUCH_QUERY                          | Cannot find target 
query                                                                  |
+| 715         | QUERY_WAS_KILLED                       | Query was killed when 
execute                                                             |
+| 800         | UNINITIALIZED_AUTH_ERROR               | Failed to initialize 
auth module                                                          |
+| 801         | WRONG_LOGIN_PASSWORD                   | Username or password 
is wrong                                                             |
+| 802         | NOT_LOGIN                              | Not login             
                                                                    |
+| 803         | NO_PERMISSION                          | No permisstion to 
operate                                                                 |
+| 804         | USER_NOT_EXIST                         | User not exists       
                                                                    |
+| 805         | USER_ALREADY_EXIST                     | User already exists   
                                                                    |
+| 806         | USER_ALREADY_HAS_ROLE                  | User already has 
target role                                                              |
+| 807         | USER_NOT_HAS_ROLE                      | User not has target 
role                                                                  |
+| 808         | ROLE_NOT_EXIST                         | Role not exists       
                                                                    |
+| 809         | ROLE_ALREADY_EXIST                     | Role already exists   
                                                                    |
+| 810         | ALREADY_HAS_PRIVILEGE                  | Already has privilege 
                                                                    |
+| 811         | NOT_HAS_PRIVILEGE                      | Not has privilege     
                                                                    |
+| 812         | CLEAR_PERMISSION_CACHE_ERROR           | Failed to clear 
permission cache                                                          |
+| 813         | UNKNOWN_AUTH_PRIVILEGE                 | Unknown auth 
privilege                                                                    |
+| 814         | UNSUPPORTED_AUTH_OPERATION             | Unsupported auth 
operation                                                                |
+| 815         | AUTH_IO_EXCEPTION                      | IO Exception in auth 
module                                                               |
+| 900         | MIGRATE_REGION_ERROR                   | Error when migrate 
region                                                                 |
+| 901         | CREATE_REGION_ERROR                    | Create region error   
                                                                    |
+| 902         | DELETE_REGION_ERROR                    | Delete region error   
                                                                    |
+| 903         | PARTITION_CACHE_UPDATE_ERROR           | Update partition 
cache failed                                                             |
+| 904         | CONSENSUS_NOT_INITIALIZED              | Consensus is not 
initialized and cannot provide service                                   |
+| 905         | REGION_LEADER_CHANGE_ERROR             | Region leader 
migration failed                                                            |
+| 906         | NO_AVAILABLE_REGION_GROUP              | Cannot find an 
available region group                                                     |
+| 907         | LACK_DATA_PARTITION_ALLOCATION         | Lacked some data 
partition allocation result in the response                              |
+| 1000        | DATANODE_ALREADY_REGISTERED            | DataNode already 
registered in cluster                                                    |
+| 1001        | NO_ENOUGH_DATANODE                     | The number of 
DataNode is not enough, cannot remove DataNode or create enough replication |
+| 1002        | ADD_CONFIGNODE_ERROR                   | Add ConfigNode error  
                                                                    |
+| 1003        | REMOVE_CONFIGNODE_ERROR                | Remove ConfigNode 
error                                                                   |
+| 1004        | DATANODE_NOT_EXIST                     | DataNode not exist 
error                                                                  |
+| 1005        | DATANODE_STOP_ERROR                    | DataNode stop error   
                                                                    |
+| 1006        | REMOVE_DATANODE_ERROR                  | Remove datanode 
failed                                                                    |
+| 1007        | REGISTER_DATANODE_WITH_WRONG_ID        | The DataNode to be 
registered has incorrect register id                                   |
+| 1008        | CAN_NOT_CONNECT_DATANODE               | Can not connect to 
DataNode                                                               |
+| 1100        | LOAD_FILE_ERROR                        | Meet error while 
loading file                                                             |
+| 1101        | LOAD_PIECE_OF_TSFILE_ERROR             | Error when load a 
piece of TsFile when loading                                            |
+| 1102        | DESERIALIZE_PIECE_OF_TSFILE_ERROR      | Error when 
deserialize a piece of TsFile                                                  |
+| 1103        | SYNC_CONNECTION_ERROR                  | Sync connection error 
                                                                    |
+| 1104        | SYNC_FILE_REDIRECTION_ERROR            | Sync TsFile 
redirection error                                                             |
+| 1105        | SYNC_FILE_ERROR                        | Sync TsFile error     
                                                                    |
+| 1106        | CREATE_PIPE_SINK_ERROR                 | Failed to create a 
PIPE sink                                                              |
+| 1107        | PIPE_ERROR                             | PIPE error            
                                                                    |
+| 1108        | PIPESERVER_ERROR                       | PIPE server error     
                                                                    |
+| 1109        | VERIFY_METADATA_ERROR                  | Meet error in 
validate timeseries schema                                                  |
+| 1200        | UDF_LOAD_CLASS_ERROR                   | Error when loading 
UDF class                                                              |
+| 1201        | UDF_DOWNLOAD_ERROR                     | DataNode cannot 
download UDF from ConfigNode                                              |
+| 1202        | CREATE_UDF_ON_DATANODE_ERROR           | Error when create UDF 
on DataNode                                                         |
+| 1203        | DROP_UDF_ON_DATANODE_ERROR             | Error when drop a UDF 
on DataNode                                                         |
+| 1300        | CREATE_TRIGGER_ERROR                   | ConfigNode create 
trigger error                                                           |
+| 1301        | DROP_TRIGGER_ERROR                     | ConfigNode delete 
Trigger error                                                           |
+| 1302        | TRIGGER_FIRE_ERROR                     | Error when firing 
trigger                                                                 |
+| 1303        | TRIGGER_LOAD_CLASS_ERROR               | Error when load class 
of trigger                                                          |
+| 1304        | TRIGGER_DOWNLOAD_ERROR                 | Error when download 
trigger from ConfigNode                                               |
+| 1305        | CREATE_TRIGGER_INSTANCE_ERROR          | Error when create 
trigger instance                                                        |
+| 1306        | ACTIVE_TRIGGER_INSTANCE_ERROR          | Error when activate 
trigger instance                                                      |
+| 1307        | DROP_TRIGGER_INSTANCE_ERROR            | Error when drop 
trigger instance                                                          |
+| 1308        | UPDATE_TRIGGER_LOCATION_ERROR          | Error when move 
stateful trigger to new datanode                                          |
+| 1400        | NO_SUCH_CQ                             | CQ task does not 
exist                                                                    |
+| 1401        | CQ_ALREADY_ACTIVE                      | CQ is already active  
                                                                    |
+| 1402        | CQ_AlREADY_EXIST                       | CQ is already exist   
                                                                    |
+| 1403        | CQ_UPDATE_LAST_EXEC_TIME_ERROR         | CQ update last 
execution time failed                                                      |
 
 > All exceptions are refactored in the latest version by extracting uniform 
 > message into exception classes. Different error codes are added to all 
 > exceptions. When an exception is caught and a higher-level exception is 
 > thrown, the error code will keep and pass so that users will know the 
 > detailed error reason.
 A base exception class "ProcessException" is also added to be extended by all 
exceptions.
diff --git a/docs/zh/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md 
b/docs/zh/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md
index 046132c5a36..a85a930ef34 100644
--- a/docs/zh/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md
+++ b/docs/zh/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md
@@ -40,7 +40,7 @@ server 的 lib 的文件夹中。
 ####################
 ### Schema Engine Configuration
 ####################
-# Choose the mode of schema engine. The value could be Memory,Schema_File and 
Rocksdb_based. If the provided value doesn't match any pre-defined value, 
Memory mode will be used as default.
+# Choose the mode of schema engine. The value could be Memory,PB_Tree and 
Rocksdb_based. If the provided value doesn't match any pre-defined value, 
Memory mode will be used as default.
 # Datatype: string
 schema_engine_mode=Rocksdb_based
 ```
diff --git a/docs/zh/UserGuide/Maintenance-Tools/SchemaFileSketch-Tool.md 
b/docs/zh/UserGuide/Maintenance-Tools/SchemaFileSketch-Tool.md
index c3ef7e70e03..6dd84407add 100644
--- a/docs/zh/UserGuide/Maintenance-Tools/SchemaFileSketch-Tool.md
+++ b/docs/zh/UserGuide/Maintenance-Tools/SchemaFileSketch-Tool.md
@@ -19,17 +19,17 @@
 
 -->
 
-## SchemaFile 解析工具
+## PBTreeFile 解析工具
 
-自 0.14.0 版本起,IoTDB 将每个 database 下序列的元数据存储为 schema_file.pst 文件。
+自 1.1 版本起,IoTDB 将每个 database 下序列的元数据存储为 pb_tree.pst 文件。
 
-如果需要将该文件转为便于阅读的的格式,可以使用本工具来解析指定 schema_file.pst 。
+如果需要将该文件转为便于阅读的的格式,可以使用本工具来解析指定 pb_tree.pst 。
 
 ### 使用方式
 
 Linux/MacOS
-> ./print-schema-file.sh -f your/path/to/schema_file.pst -o 
/your/path/to/sketch.txt
+> ./print-pb-tree-file.sh -f your/path/to/pb_tree.pst -o 
/your/path/to/sketch.txt
 
 Windows
 
-> ./print-schema-file.bat -f your/path/to/schema_file.pst -o 
/your/path/to/sketch.txt
+> ./print-pb-tree-file.bat -f your/path/to/pb_tree.pst -o 
/your/path/to/sketch.txt
diff --git a/docs/zh/UserGuide/Reference/Common-Config-Manual.md 
b/docs/zh/UserGuide/Reference/Common-Config-Manual.md
index 6283fa429a3..eb657775fab 100644
--- a/docs/zh/UserGuide/Reference/Common-Config-Manual.md
+++ b/docs/zh/UserGuide/Reference/Common-Config-Manual.md
@@ -368,7 +368,7 @@ IoTDB ConfigNode 和 DataNode 的公共配置参数位于 `conf` 目录下。
 
 |名字| schema\_engine\_mode |
 |:---:|:---|
-|描述| 元数据引擎的运行模式,支持 Memory 和 Schema_File两种模式;Schema_File 
模式下支持将内存中暂时不用的序列元数据实时置换到磁盘上,需要使用时再加载进内存;此参数在集群中所有的 DataNode 上务必保持相同。|
+|描述| 元数据引擎的运行模式,支持 Memory 和 PB_Tree;PB_Tree 
模式下支持将内存中暂时不用的序列元数据实时置换到磁盘上,需要使用时再加载进内存;此参数在集群中所有的 DataNode 上务必保持相同。|
 |类型| string |
 |默认值| Memory |
 |改后生效方式|仅允许在第一次启动服务前修改|
diff --git a/docs/zh/UserGuide/Reference/Status-Codes.md 
b/docs/zh/UserGuide/Reference/Status-Codes.md
index c73fc450c98..535817d0b9b 100644
--- a/docs/zh/UserGuide/Reference/Status-Codes.md
+++ b/docs/zh/UserGuide/Reference/Status-Codes.md
@@ -43,136 +43,136 @@ try {
 
 这里是状态码和相对应信息的列表:
 
-| 状态码  | 状态类型                               | 状态信息                      |
-|:-----|:-----------------------------------|:--------------------------|
-| 200  | SUCCESS_STATUS                     | 成功状态                      |
-| 201  | INCOMPATIBLE_VERSION               | 版本不兼容                     |
-| 202  | CONFIGURATION_ERROR                | 配置文件有错误项                  |
-| 203  | START_UP_ERROR                     | 启动错误                      |
-| 204  | SHUT_DOWN_ERROR                    | 关机错误                      |
-| 300  | UNSUPPORTED_OPERATION              | 不支持的操作                    |
-| 301  | EXECUTE_STATEMENT_ERROR            | 执行语句错误                    |
-| 302  | MULTIPLE_ERROR                     | 多行语句执行错误                  |
-| 303  | ILLEGAL_PARAMETER                  | 参数错误                      |
-| 304  | OVERLAP_WITH_EXISTING_TASK         | 与正在执行的其他操作冲突              |
-| 305  | INTERNAL_SERVER_ERROR              | 服务器内部错误                   |
-| 306  | DISPATCH_ERROR                     | 分发错误                      |
-| 400  | REDIRECTION_RECOMMEND              | 推荐客户端重定向                  |
-| 500  | DATABASE_NOT_EXIST                 | 数据库不存在                    |
-| 501  | DATABASE_ALREADY_EXISTS            | 数据库已存在                    |
-| 502  | SERIES_OVERFLOW                    | 序列数量超过阈值                  |
-| 503  | TIMESERIES_ALREADY_EXIST           | 时间序列已存在                   |
-| 504  | TIMESERIES_IN_BLACK_LIST           | 时间序列正在删除                  |
-| 505  | ALIAS_ALREADY_EXIST                | 路径别名已经存在                  |
-| 506  | PATH_ALREADY_EXIST                 | 路径已经存在                    |
-| 507  | METADATA_ERROR                     | 处理元数据错误                   |
-| 508  | PATH_NOT_EXIST                     | 路径不存在                     |
-| 509  | ILLEGAL_PATH                       | 路径不合法                     |
-| 510  | CREATE_TEMPLATE_ERROR              | 创建物理量模板失败                 |
-| 511  | DUPLICATED_TEMPLATE                | 元数据模板重复                   |
-| 512  | UNDEFINED_TEMPLATE                 | 元数据模板未定义                  |
-| 513  | TEMPLATE_NOT_SET                   | 元数据模板未设置                  |
-| 514  | DIFFERENT_TEMPLATE                 | 元数据模板不一致                  |
-| 515  | TEMPLATE_IS_IN_USE                 | 元数据模板正在使用                 |
-| 516  | TEMPLATE_INCOMPATIBLE              | 元数据模板不兼容                  |
-| 517  | SEGMENT_NOT_FOUND                  | 未找到 Segment               |
-| 518  | PAGE_OUT_OF_SPACE                  | SchemaFile 中 Page 空间不够    |
-| 519  | RECORD_DUPLICATED                  | 记录重复                      |
-| 520  | SEGMENT_OUT_OF_SPACE               | SchemaFile 中 segment 空间不够 |
-| 521  | SCHEMA_FILE_NOT_EXISTS             | SchemaFile 不存在            |
-| 522  | OVERSIZE_RECORD                    | 记录大小超过元数据文件页面大小           |
-| 523  | SCHEMA_FILE_REDO_LOG_BROKEN        | SchemaFile 的 redo 日志损坏    |
-| 524  | TEMPLATE_NOT_ACTIVATED             | 元数据模板未激活                  |
-| 526  | SCHEMA_QUOTA_EXCEEDED              | 集群元数据超过配额上限               |
-| 527  | MEASUREMENT_ALREADY_EXISTS_IN_TEMPLATE            | 元数据模板中已存在物理量      
            |
-| 600  | SYSTEM_READ_ONLY                   | IoTDB 系统只读                |
-| 601  | STORAGE_ENGINE_ERROR               | 存储引擎相关错误                  |
-| 602  | STORAGE_ENGINE_NOT_READY           | 存储引擎还在恢复中,还不能接受读写操作       |
-| 603  | DATAREGION_PROCESS_ERROR           | DataRegion 相关错误           |
-| 604  | TSFILE_PROCESSOR_ERROR             | TsFile 处理器相关错误            |
-| 605  | WRITE_PROCESS_ERROR                | 写入相关错误                    |
-| 606  | WRITE_PROCESS_REJECT               | 写入拒绝错误                    |
-| 607  | OUT_OF_TTL                         | 插入时间少于 TTL 时间边界           |
-| 608  | COMPACTION_ERROR                   | 合并错误                      |
-| 609  | ALIGNED_TIMESERIES_ERROR           | 对齐时间序列错误                  |
-| 610  | WAL_ERROR                          | WAL 异常                    |
-| 611  | DISK_SPACE_INSUFFICIENT            | 磁盘空间不足                    |
-| 700  | SQL_PARSE_ERROR                    | SQL 语句分析错误                |
-| 701  | SEMANTIC_ERROR                     | SQL 语义错误                  |
-| 702  | GENERATE_TIME_ZONE_ERROR           | 生成时区错误                    |
-| 703  | SET_TIME_ZONE_ERROR                | 设置时区错误                    |
-| 704  | QUERY_NOT_ALLOWED                  | 查询语句不允许                   |
-| 705  | LOGICAL_OPERATOR_ERROR             | 逻辑符相关错误                   |
-| 706  | LOGICAL_OPTIMIZE_ERROR             | 逻辑优化相关错误                  |
-| 707  | UNSUPPORTED_FILL_TYPE              | 不支持的填充类型                  |
-| 708  | QUERY_PROCESS_ERROR                | 查询处理相关错误                  |
-| 709  | MPP_MEMORY_NOT_ENOUGH              | MPP 框架中任务执行内存不足           |
-| 710  | CLOSE_OPERATION_ERROR              | 关闭操作错误                    |
-| 711  | TSBLOCK_SERIALIZE_ERROR            | TsBlock 序列化错误             |
-| 712  | INTERNAL_REQUEST_TIME_OUT          | MPP 操作超时                  |
-| 713  | INTERNAL_REQUEST_RETRY_ERROR       | 内部操作重试失败                  |
-| 714  | NO_SUCH_QUERY                      | 查询不存在                     |
-| 715  | QUERY_WAS_KILLED                   | 查询执行时被终止                  |
-| 800  | UNINITIALIZED_AUTH_ERROR           | 授权模块未初始化                  |
-| 801  | WRONG_LOGIN_PASSWORD               | 用户名或密码错误                  |
-| 802  | NOT_LOGIN                          | 没有登录                      |
-| 803  | NO_PERMISSION                      | 没有操作权限                    |
-| 804  | USER_NOT_EXIST                     | 用户不存在                     |
-| 805  | USER_ALREADY_EXIST                 | 用户已存在                     |
-| 806  | USER_ALREADY_HAS_ROLE              | 用户拥有对应角色                  |
-| 807  | USER_NOT_HAS_ROLE                  | 用户未拥有对应角色                 |
-| 808  | ROLE_NOT_EXIST                     | 角色不存在                     |
-| 809  | ROLE_ALREADY_EXIST                 | 角色已存在                     |
-| 810  | ALREADY_HAS_PRIVILEGE              | 已拥有对应权限                   |
-| 811  | NOT_HAS_PRIVILEGE                  | 未拥有对应权限                   |
-| 812  | CLEAR_PERMISSION_CACHE_ERROR       | 清空权限缓存失败                  |
-| 813  | UNKNOWN_AUTH_PRIVILEGE             | 未知权限                      |
-| 814  | UNSUPPORTED_AUTH_OPERATION         | 不支持的权限操作                  |
-| 815  | AUTH_IO_EXCEPTION                  | 权限模块IO异常                  |
-| 900  | MIGRATE_REGION_ERROR               | Region 迁移失败               |
-| 901  | CREATE_REGION_ERROR                | 创建 region 失败              |
-| 902  | DELETE_REGION_ERROR                | 删除 region 失败              |
-| 903  | PARTITION_CACHE_UPDATE_ERROR       | 更新分区缓存失败                  |
-| 904  | CONSENSUS_NOT_INITIALIZED          | 共识层未初始化,不能提供服务            |
-| 905  | REGION_LEADER_CHANGE_ERROR         | Region leader 迁移失败        |
-| 906  | NO_AVAILABLE_REGION_GROUP          | 无法找到可用的 Region 副本组        |
-| 907  | LACK_DATA_PARTITION_ALLOCATION     | 调用创建数据分区方法的返回结果里缺少信息      |
-| 1000 | DATANODE_ALREADY_REGISTERED        | DataNode 在集群中已经注册         |
-| 1001 | NO_ENOUGH_DATANODE                 | DataNode 数量不足,无法移除节点或创建副本 |
-| 1002 | ADD_CONFIGNODE_ERROR               | 新增 ConfigNode 失败          |
-| 1003 | REMOVE_CONFIGNODE_ERROR            | 移除 ConfigNode 失败          |
-| 1004 | DATANODE_NOT_EXIST                 | 此 DataNode 不存在            |
-| 1005 | DATANODE_STOP_ERROR                | DataNode 关闭失败             |
-| 1006 | REMOVE_DATANODE_ERROR              | 移除 datanode 失败            |
-| 1007 | REGISTER_DATANODE_WITH_WRONG_ID    | 注册的 DataNode 中有错误的注册id    |
-| 1008 | CAN_NOT_CONNECT_DATANODE           | 连接 DataNode 失败            |
-| 1100 | LOAD_FILE_ERROR                    | 加载文件错误                    |
-| 1101 | LOAD_PIECE_OF_TSFILE_ERROR         | 加载 TsFile 片段异常            |
-| 1102 | DESERIALIZE_PIECE_OF_TSFILE_ERROR  | 反序列化 TsFile 片段异常          |
-| 1103 | SYNC_CONNECTION_ERROR              | 同步连接错误                    |
-| 1104 | SYNC_FILE_REDIRECTION_ERROR        | 同步文件时重定向异常                |
-| 1105 | SYNC_FILE_ERROR                    | 同步文件异常                    |
-| 1106 | CREATE_PIPE_SINK_ERROR             | 创建 PIPE Sink 失败           |
-| 1107 | PIPE_ERROR                         | PIPE 异常                   |
-| 1108 | PIPESERVER_ERROR                   | PIPE server 异常            |
-| 1109 | VERIFY_METADATA_ERROR              | 校验元数据失败                   |
-| 1200 | UDF_LOAD_CLASS_ERROR               | UDF 加载类异常                 |
-| 1201 | UDF_DOWNLOAD_ERROR                 | 无法从 ConfigNode 下载 UDF     |
-| 1202 | CREATE_UDF_ON_DATANODE_ERROR       | 在 DataNode 创建 UDF 失败      |
-| 1203 | DROP_UDF_ON_DATANODE_ERROR         | 在 DataNode 卸载 UDF 失败      |
-| 1300 | CREATE_TRIGGER_ERROR               | ConfigNode 创建 Trigger 失败  |
-| 1301 | DROP_TRIGGER_ERROR                 | ConfigNode 删除 Trigger 失败  |
-| 1302 | TRIGGER_FIRE_ERROR                 | 触发器执行错误                   |
-| 1303 | TRIGGER_LOAD_CLASS_ERROR           | 触发器加载类异常                  |
-| 1304 | TRIGGER_DOWNLOAD_ERROR             | 从 ConfigNode 下载触发器异常      |
-| 1305 | CREATE_TRIGGER_INSTANCE_ERROR      | 创建触发器实例异常                 |
-| 1306 | ACTIVE_TRIGGER_INSTANCE_ERROR      | 激活触发器实例异常                 |
-| 1307 | DROP_TRIGGER_INSTANCE_ERROR        | 删除触发器实例异常                 |
-| 1308 | UPDATE_TRIGGER_LOCATION_ERROR      | 更新有状态的触发器所在 DataNode 异常   |
-| 1400 | NO_SUCH_CQ                         | CQ 任务不存在                  |
-| 1401 | CQ_ALREADY_ACTIVE                  | CQ 任务已激活                  |
-| 1402 | CQ_AlREADY_EXIST                   | CQ 任务已存在                  |
-| 1403 | CQ_UPDATE_LAST_EXEC_TIME_ERROR     | CQ 更新上一次执行时间失败            |
+| 状态码  | 状态类型                                   | 状态信息                      |
+|:-----|:---------------------------------------|:--------------------------|
+| 200  | SUCCESS_STATUS                         | 成功状态                      |
+| 201  | INCOMPATIBLE_VERSION                   | 版本不兼容                     |
+| 202  | CONFIGURATION_ERROR                    | 配置文件有错误项                  |
+| 203  | START_UP_ERROR                         | 启动错误                      |
+| 204  | SHUT_DOWN_ERROR                        | 关机错误                      |
+| 300  | UNSUPPORTED_OPERATION                  | 不支持的操作                    |
+| 301  | EXECUTE_STATEMENT_ERROR                | 执行语句错误                    |
+| 302  | MULTIPLE_ERROR                         | 多行语句执行错误                  |
+| 303  | ILLEGAL_PARAMETER                      | 参数错误                      |
+| 304  | OVERLAP_WITH_EXISTING_TASK             | 与正在执行的其他操作冲突              |
+| 305  | INTERNAL_SERVER_ERROR                  | 服务器内部错误                   |
+| 306  | DISPATCH_ERROR                         | 分发错误                      |
+| 400  | REDIRECTION_RECOMMEND                  | 推荐客户端重定向                  |
+| 500  | DATABASE_NOT_EXIST                     | 数据库不存在                    |
+| 501  | DATABASE_ALREADY_EXISTS                | 数据库已存在                    |
+| 502  | SERIES_OVERFLOW                        | 序列数量超过阈值                  |
+| 503  | TIMESERIES_ALREADY_EXIST               | 时间序列已存在                   |
+| 504  | TIMESERIES_IN_BLACK_LIST               | 时间序列正在删除                  |
+| 505  | ALIAS_ALREADY_EXIST                    | 路径别名已经存在                  |
+| 506  | PATH_ALREADY_EXIST                     | 路径已经存在                    |
+| 507  | METADATA_ERROR                         | 处理元数据错误                   |
+| 508  | PATH_NOT_EXIST                         | 路径不存在                     |
+| 509  | ILLEGAL_PATH                           | 路径不合法                     |
+| 510  | CREATE_TEMPLATE_ERROR                  | 创建物理量模板失败                 |
+| 511  | DUPLICATED_TEMPLATE                    | 元数据模板重复                   |
+| 512  | UNDEFINED_TEMPLATE                     | 元数据模板未定义                  |
+| 513  | TEMPLATE_NOT_SET                       | 元数据模板未设置                  |
+| 514  | DIFFERENT_TEMPLATE                     | 元数据模板不一致                  |
+| 515  | TEMPLATE_IS_IN_USE                     | 元数据模板正在使用                 |
+| 516  | TEMPLATE_INCOMPATIBLE                  | 元数据模板不兼容                  |
+| 517  | SEGMENT_NOT_FOUND                      | 未找到 Segment               |
+| 518  | PAGE_OUT_OF_SPACE                      | PBTreeFile 中 Page 空间不够    |
+| 519  | RECORD_DUPLICATED                      | 记录重复                      |
+| 520  | SEGMENT_OUT_OF_SPACE                   | PBTreeFile 中 segment 空间不够 |
+| 521  | PB_TREE_FILE_NOT_EXISTS                | PBTreeFile 不存在            |
+| 522  | OVERSIZE_RECORD                        | 记录大小超过元数据文件页面大小           |
+| 523  | PB_TREE_FILE_REDO_LOG_BROKEN           | PBTreeFile 的 redo 日志损坏    |
+| 524  | TEMPLATE_NOT_ACTIVATED                 | 元数据模板未激活                  |
+| 526  | SCHEMA_QUOTA_EXCEEDED                  | 集群元数据超过配额上限               |
+| 527  | MEASUREMENT_ALREADY_EXISTS_IN_TEMPLATE | 元数据模板中已存在物理量              |
+| 600  | SYSTEM_READ_ONLY                       | IoTDB 系统只读                |
+| 601  | STORAGE_ENGINE_ERROR                   | 存储引擎相关错误                  |
+| 602  | STORAGE_ENGINE_NOT_READY               | 存储引擎还在恢复中,还不能接受读写操作       |
+| 603  | DATAREGION_PROCESS_ERROR               | DataRegion 相关错误           |
+| 604  | TSFILE_PROCESSOR_ERROR                 | TsFile 处理器相关错误            |
+| 605  | WRITE_PROCESS_ERROR                    | 写入相关错误                    |
+| 606  | WRITE_PROCESS_REJECT                   | 写入拒绝错误                    |
+| 607  | OUT_OF_TTL                             | 插入时间少于 TTL 时间边界           |
+| 608  | COMPACTION_ERROR                       | 合并错误                      |
+| 609  | ALIGNED_TIMESERIES_ERROR               | 对齐时间序列错误                  |
+| 610  | WAL_ERROR                              | WAL 异常                    |
+| 611  | DISK_SPACE_INSUFFICIENT                | 磁盘空间不足                    |
+| 700  | SQL_PARSE_ERROR                        | SQL 语句分析错误                |
+| 701  | SEMANTIC_ERROR                         | SQL 语义错误                  |
+| 702  | GENERATE_TIME_ZONE_ERROR               | 生成时区错误                    |
+| 703  | SET_TIME_ZONE_ERROR                    | 设置时区错误                    |
+| 704  | QUERY_NOT_ALLOWED                      | 查询语句不允许                   |
+| 705  | LOGICAL_OPERATOR_ERROR                 | 逻辑符相关错误                   |
+| 706  | LOGICAL_OPTIMIZE_ERROR                 | 逻辑优化相关错误                  |
+| 707  | UNSUPPORTED_FILL_TYPE                  | 不支持的填充类型                  |
+| 708  | QUERY_PROCESS_ERROR                    | 查询处理相关错误                  |
+| 709  | MPP_MEMORY_NOT_ENOUGH                  | MPP 框架中任务执行内存不足           |
+| 710  | CLOSE_OPERATION_ERROR                  | 关闭操作错误                    |
+| 711  | TSBLOCK_SERIALIZE_ERROR                | TsBlock 序列化错误             |
+| 712  | INTERNAL_REQUEST_TIME_OUT              | MPP 操作超时                  |
+| 713  | INTERNAL_REQUEST_RETRY_ERROR           | 内部操作重试失败                  |
+| 714  | NO_SUCH_QUERY                          | 查询不存在                     |
+| 715  | QUERY_WAS_KILLED                       | 查询执行时被终止                  |
+| 800  | UNINITIALIZED_AUTH_ERROR               | 授权模块未初始化                  |
+| 801  | WRONG_LOGIN_PASSWORD                   | 用户名或密码错误                  |
+| 802  | NOT_LOGIN                              | 没有登录                      |
+| 803  | NO_PERMISSION                          | 没有操作权限                    |
+| 804  | USER_NOT_EXIST                         | 用户不存在                     |
+| 805  | USER_ALREADY_EXIST                     | 用户已存在                     |
+| 806  | USER_ALREADY_HAS_ROLE                  | 用户拥有对应角色                  |
+| 807  | USER_NOT_HAS_ROLE                      | 用户未拥有对应角色                 |
+| 808  | ROLE_NOT_EXIST                         | 角色不存在                     |
+| 809  | ROLE_ALREADY_EXIST                     | 角色已存在                     |
+| 810  | ALREADY_HAS_PRIVILEGE                  | 已拥有对应权限                   |
+| 811  | NOT_HAS_PRIVILEGE                      | 未拥有对应权限                   |
+| 812  | CLEAR_PERMISSION_CACHE_ERROR           | 清空权限缓存失败                  |
+| 813  | UNKNOWN_AUTH_PRIVILEGE                 | 未知权限                      |
+| 814  | UNSUPPORTED_AUTH_OPERATION             | 不支持的权限操作                  |
+| 815  | AUTH_IO_EXCEPTION                      | 权限模块IO异常                  |
+| 900  | MIGRATE_REGION_ERROR                   | Region 迁移失败               |
+| 901  | CREATE_REGION_ERROR                    | 创建 region 失败              |
+| 902  | DELETE_REGION_ERROR                    | 删除 region 失败              |
+| 903  | PARTITION_CACHE_UPDATE_ERROR           | 更新分区缓存失败                  |
+| 904  | CONSENSUS_NOT_INITIALIZED              | 共识层未初始化,不能提供服务            |
+| 905  | REGION_LEADER_CHANGE_ERROR             | Region leader 迁移失败        |
+| 906  | NO_AVAILABLE_REGION_GROUP              | 无法找到可用的 Region 副本组        |
+| 907  | LACK_DATA_PARTITION_ALLOCATION         | 调用创建数据分区方法的返回结果里缺少信息      |
+| 1000 | DATANODE_ALREADY_REGISTERED            | DataNode 在集群中已经注册         |
+| 1001 | NO_ENOUGH_DATANODE                     | DataNode 数量不足,无法移除节点或创建副本 |
+| 1002 | ADD_CONFIGNODE_ERROR                   | 新增 ConfigNode 失败          |
+| 1003 | REMOVE_CONFIGNODE_ERROR                | 移除 ConfigNode 失败          |
+| 1004 | DATANODE_NOT_EXIST                     | 此 DataNode 不存在            |
+| 1005 | DATANODE_STOP_ERROR                    | DataNode 关闭失败             |
+| 1006 | REMOVE_DATANODE_ERROR                  | 移除 datanode 失败            |
+| 1007 | REGISTER_DATANODE_WITH_WRONG_ID        | 注册的 DataNode 中有错误的注册id    |
+| 1008 | CAN_NOT_CONNECT_DATANODE               | 连接 DataNode 失败            |
+| 1100 | LOAD_FILE_ERROR                        | 加载文件错误                    |
+| 1101 | LOAD_PIECE_OF_TSFILE_ERROR             | 加载 TsFile 片段异常            |
+| 1102 | DESERIALIZE_PIECE_OF_TSFILE_ERROR      | 反序列化 TsFile 片段异常          |
+| 1103 | SYNC_CONNECTION_ERROR                  | 同步连接错误                    |
+| 1104 | SYNC_FILE_REDIRECTION_ERROR            | 同步文件时重定向异常                |
+| 1105 | SYNC_FILE_ERROR                        | 同步文件异常                    |
+| 1106 | CREATE_PIPE_SINK_ERROR                 | 创建 PIPE Sink 失败           |
+| 1107 | PIPE_ERROR                             | PIPE 异常                   |
+| 1108 | PIPESERVER_ERROR                       | PIPE server 异常            |
+| 1109 | VERIFY_METADATA_ERROR                  | 校验元数据失败                   |
+| 1200 | UDF_LOAD_CLASS_ERROR                   | UDF 加载类异常                 |
+| 1201 | UDF_DOWNLOAD_ERROR                     | 无法从 ConfigNode 下载 UDF     |
+| 1202 | CREATE_UDF_ON_DATANODE_ERROR           | 在 DataNode 创建 UDF 失败      |
+| 1203 | DROP_UDF_ON_DATANODE_ERROR             | 在 DataNode 卸载 UDF 失败      |
+| 1300 | CREATE_TRIGGER_ERROR                   | ConfigNode 创建 Trigger 失败  |
+| 1301 | DROP_TRIGGER_ERROR                     | ConfigNode 删除 Trigger 失败  |
+| 1302 | TRIGGER_FIRE_ERROR                     | 触发器执行错误                   |
+| 1303 | TRIGGER_LOAD_CLASS_ERROR               | 触发器加载类异常                  |
+| 1304 | TRIGGER_DOWNLOAD_ERROR                 | 从 ConfigNode 下载触发器异常      |
+| 1305 | CREATE_TRIGGER_INSTANCE_ERROR          | 创建触发器实例异常                 |
+| 1306 | ACTIVE_TRIGGER_INSTANCE_ERROR          | 激活触发器实例异常                 |
+| 1307 | DROP_TRIGGER_INSTANCE_ERROR            | 删除触发器实例异常                 |
+| 1308 | UPDATE_TRIGGER_LOCATION_ERROR          | 更新有状态的触发器所在 DataNode 异常   |
+| 1400 | NO_SUCH_CQ                             | CQ 任务不存在                  |
+| 1401 | CQ_ALREADY_ACTIVE                      | CQ 任务已激活                  |
+| 1402 | CQ_AlREADY_EXIST                       | CQ 任务已存在                  |
+| 1403 | CQ_UPDATE_LAST_EXEC_TIME_ERROR         | CQ 更新上一次执行时间失败            |
 
 > 在最新版本中,我们重构了 IoTDB 
 > 的异常类。通过将错误信息统一提取到异常类中,并为所有异常添加不同的错误代码,从而当捕获到异常并引发更高级别的异常时,错误代码将保留并传递,以便用户了解详细的错误原因。
 除此之外,我们添加了一个基础异常类“ProcessException”,由所有异常扩展。
diff --git 
a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBSortedShowTimeseriesIT.java
 
b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBSortedShowTimeseriesIT.java
index 3af3d7f1ccb..e5426134647 100644
--- 
a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBSortedShowTimeseriesIT.java
+++ 
b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBSortedShowTimeseriesIT.java
@@ -101,7 +101,7 @@ public class IoTDBSortedShowTimeseriesIT extends 
AbstractSchemaIT {
   @Parameterized.BeforeParam
   public static void before() throws Exception {
     SchemaTestMode schemaTestMode = setUpEnvironment();
-    if (schemaTestMode.equals(SchemaTestMode.SchemaFile)) {
+    if (schemaTestMode.equals(SchemaTestMode.PBTree)) {
       allocateMemoryForSchemaRegion(10000);
     }
     EnvFactory.getEnv().initClusterEnvironment();
diff --git 
a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBTagIT.java 
b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBTagIT.java
index 561bf4915c5..7a6d59d29fe 100644
--- 
a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBTagIT.java
+++ 
b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBTagIT.java
@@ -53,7 +53,7 @@ public class IoTDBTagIT extends AbstractSchemaIT {
   @Parameterized.BeforeParam
   public static void before() throws Exception {
     SchemaTestMode schemaTestMode = setUpEnvironment();
-    if (schemaTestMode.equals(SchemaTestMode.SchemaFile)) {
+    if (schemaTestMode.equals(SchemaTestMode.PBTree)) {
       allocateMemoryForSchemaRegion(10000);
     }
     EnvFactory.getEnv().initClusterEnvironment();
diff --git 
a/integration-test/src/test/java/org/apache/iotdb/util/AbstractSchemaIT.java 
b/integration-test/src/test/java/org/apache/iotdb/util/AbstractSchemaIT.java
index 618cdc1dc14..78a588fa8ca 100644
--- a/integration-test/src/test/java/org/apache/iotdb/util/AbstractSchemaIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/util/AbstractSchemaIT.java
@@ -36,8 +36,8 @@ import java.util.List;
 
 /**
  * This class define multiple modes for schema engine. All IT class extends 
AbstractSchemaIT will be
- * run in both Memory and Schema_File modes. In Schema_File mode, there are 
three kinds of test
- * environment: full memory, partial memory and non memory.
+ * run in both Memory and PB_Tree modes. In PB_Tree mode, there are three 
kinds of test environment:
+ * full memory, partial memory and non memory.
  *
  * <p>Notice that, all IT class extends AbstractSchemaIT need to call {@link
  * AbstractSchemaIT#setUpEnvironment} before test env initialization and call 
{@link
@@ -51,13 +51,13 @@ public abstract class AbstractSchemaIT {
   protected SchemaTestMode schemaTestMode;
 
   protected static final List<SchemaTestMode> schemaTestModes =
-      Arrays.asList(SchemaTestMode.Memory, SchemaTestMode.SchemaFile);
+      Arrays.asList(SchemaTestMode.Memory, SchemaTestMode.PBTree);
 
   private static int mode = 0;
 
   @Parameterized.Parameters(name = "SchemaEngineMode={0}")
   public static Iterable<SchemaTestMode> data() {
-    return Arrays.asList(SchemaTestMode.Memory, SchemaTestMode.SchemaFile);
+    return Arrays.asList(SchemaTestMode.Memory, SchemaTestMode.PBTree);
   }
 
   public AbstractSchemaIT(SchemaTestMode schemaTestMode) {
@@ -75,8 +75,8 @@ public abstract class AbstractSchemaIT {
       case Memory:
         
EnvFactory.getEnv().getConfig().getCommonConfig().setSchemaEngineMode("Memory");
         break;
-      case SchemaFile:
-        
EnvFactory.getEnv().getConfig().getCommonConfig().setSchemaEngineMode("Schema_File");
+      case PBTree:
+        
EnvFactory.getEnv().getConfig().getCommonConfig().setSchemaEngineMode("PB_Tree");
         allocateMemoryForSchemaRegion(4000);
         break;
     }
@@ -122,6 +122,6 @@ public abstract class AbstractSchemaIT {
 
   protected enum SchemaTestMode {
     Memory,
-    SchemaFile
+    PBTree
   }
 }
diff --git a/iotdb-client/client-cpp/src/main/Session.h 
b/iotdb-client/client-cpp/src/main/Session.h
index d57aad88655..9fcf080cbd8 100644
--- a/iotdb-client/client-cpp/src/main/Session.h
+++ b/iotdb-client/client-cpp/src/main/Session.h
@@ -229,9 +229,9 @@ namespace TSStatusCode {
         PAGE_OUT_OF_SPACE = 518,
         RECORD_DUPLICATED=519,
         SEGMENT_OUT_OF_SPACE = 520,
-        SCHEMA_FILE_NOT_EXISTS = 521,
+        PB_TREE_FILE_NOT_EXISTS = 521,
         OVERSIZE_RECORD = 522,
-        SCHEMA_FILE_REDO_LOG_BROKEN = 523,
+        PB_TREE_FILE_REDO_LOG_BROKEN = 523,
         TEMPLATE_NOT_ACTIVATED = 524,
 
         // Storage Engine
diff --git 
a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java 
b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java
index 56bfd4a78aa..cb2dbd24857 100644
--- 
a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java
+++ 
b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java
@@ -72,9 +72,9 @@ public enum TSStatusCode {
   PAGE_OUT_OF_SPACE(518),
   RECORD_DUPLICATED(519),
   SEGMENT_OUT_OF_SPACE(520),
-  SCHEMA_FILE_NOT_EXISTS(521),
+  PB_TREE_FILE_NOT_EXISTS(521),
   OVERSIZE_RECORD(522),
-  SCHEMA_FILE_REDO_LOG_BROKEN(523),
+  PB_TREE_FILE_REDO_LOG_BROKEN(523),
   TEMPLATE_NOT_ACTIVATED(524),
   DATABASE_CONFIG_ERROR(525),
   SCHEMA_QUOTA_EXCEEDED(526),
diff --git a/node-commons/src/assembly/resources/conf/iotdb-common.properties 
b/node-commons/src/assembly/resources/conf/iotdb-common.properties
index e353c67faa9..54e6e49acbb 100644
--- a/node-commons/src/assembly/resources/conf/iotdb-common.properties
+++ b/node-commons/src/assembly/resources/conf/iotdb-common.properties
@@ -248,7 +248,7 @@ cluster_name=defaultCluster
 ### Schema Engine Configuration
 ####################
 
-# The schema management mode of schema engine. Currently support Memory and 
Schema_File.
+# The schema management mode of schema engine. Currently support Memory and 
PB_Tree.
 # This config of all DataNodes in one cluster must keep same.
 # Datatype: string
 # schema_engine_mode=Memory
diff --git a/server/src/assembly/resources/tools/schema/print-schema-file.bat 
b/server/src/assembly/resources/tools/schema/print-pb-tree-file.bat
similarity index 97%
rename from server/src/assembly/resources/tools/schema/print-schema-file.bat
rename to server/src/assembly/resources/tools/schema/print-pb-tree-file.bat
index 18ca596c6eb..8a4bbf23e3b 100644
--- a/server/src/assembly/resources/tools/schema/print-schema-file.bat
+++ b/server/src/assembly/resources/tools/schema/print-pb-tree-file.bat
@@ -29,7 +29,7 @@ pushd %~dp0..\..
 if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD%
 popd
 
-if NOT DEFINED MAIN_CLASS set 
MAIN_CLASS=org.apache.iotdb.db.tools.schema.SchemaFileSketchTool
+if NOT DEFINED MAIN_CLASS set 
MAIN_CLASS=org.apache.iotdb.db.tools.schema.PBTreeFileSketchTool
 if NOT DEFINED JAVA_HOME goto :err
 
 @REM 
-----------------------------------------------------------------------------
diff --git a/server/src/assembly/resources/tools/schema/print-schema-file.sh 
b/server/src/assembly/resources/tools/schema/print-pb-tree-file.sh
similarity index 100%
rename from server/src/assembly/resources/tools/schema/print-schema-file.sh
rename to server/src/assembly/resources/tools/schema/print-pb-tree-file.sh
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java 
b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index b98bad05684..cb83f3c2523 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -861,16 +861,16 @@ public class IoTDBConfig {
   private String schemaEngineMode = "Memory";
 
   /** the memory used for metadata cache when using persistent schema */
-  private int cachedMNodeSizeInSchemaFileMode = -1;
+  private int cachedMNodeSizeInPBTreeMode = -1;
 
-  /** the minimum size (in bytes) of segment inside a schema file page */
-  private short minimumSegmentInSchemaFile = 0;
+  /** the minimum size (in bytes) of segment inside a pb-tree file page */
+  private short minimumSegmentInPBTree = 0;
 
-  /** cache size for pages in one schema file */
-  private int pageCacheSizeInSchemaFile = 1024;
+  /** cache size for pages in one pb-tree file */
+  private int pageCacheSizeInPBTree = 1024;
 
   /** maximum number of logged pages before log erased */
-  private int schemaFileLogSize = 16384;
+  private int pbTreeLogSize = 16384;
 
   /**
    * Maximum number of measurement in one create timeseries plan node. If the 
number of measurement
@@ -2956,37 +2956,37 @@ public class IoTDBConfig {
     this.schemaEngineMode = schemaEngineMode;
   }
 
-  public int getCachedMNodeSizeInSchemaFileMode() {
-    return cachedMNodeSizeInSchemaFileMode;
+  public int getCachedMNodeSizeInPBTreeMode() {
+    return cachedMNodeSizeInPBTreeMode;
   }
 
   @TestOnly
-  public void setCachedMNodeSizeInSchemaFileMode(int 
cachedMNodeSizeInSchemaFileMode) {
-    this.cachedMNodeSizeInSchemaFileMode = cachedMNodeSizeInSchemaFileMode;
+  public void setCachedMNodeSizeInPBTreeMode(int cachedMNodeSizeInPBTreeMode) {
+    this.cachedMNodeSizeInPBTreeMode = cachedMNodeSizeInPBTreeMode;
   }
 
-  public short getMinimumSegmentInSchemaFile() {
-    return minimumSegmentInSchemaFile;
+  public short getMinimumSegmentInPBTree() {
+    return minimumSegmentInPBTree;
   }
 
-  public void setMinimumSegmentInSchemaFile(short minimumSegmentInSchemaFile) {
-    this.minimumSegmentInSchemaFile = minimumSegmentInSchemaFile;
+  public void setMinimumSegmentInPBTree(short minimumSegmentInPBTree) {
+    this.minimumSegmentInPBTree = minimumSegmentInPBTree;
   }
 
-  public int getPageCacheSizeInSchemaFile() {
-    return pageCacheSizeInSchemaFile;
+  public int getPageCacheSizeInPBTree() {
+    return pageCacheSizeInPBTree;
   }
 
-  public void setPageCacheSizeInSchemaFile(int pageCacheSizeInSchemaFile) {
-    this.pageCacheSizeInSchemaFile = pageCacheSizeInSchemaFile;
+  public void setPageCacheSizeInPBTree(int pageCacheSizeInPBTree) {
+    this.pageCacheSizeInPBTree = pageCacheSizeInPBTree;
   }
 
-  public int getSchemaFileLogSize() {
-    return schemaFileLogSize;
+  public int getPBTreeLogSize() {
+    return pbTreeLogSize;
   }
 
-  public void setSchemaFileLogSize(int schemaFileLogSize) {
-    this.schemaFileLogSize = schemaFileLogSize;
+  public void setPBTreeLogSize(int pbTreeLogSize) {
+    this.pbTreeLogSize = pbTreeLogSize;
   }
 
   public int getMaxMeasurementNumOfInternalRequest() {
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java 
b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 256af02c3de..2e749e26056 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -891,27 +891,26 @@ public class IoTDBDescriptor {
       conf.setEnableLastCache(false);
     }
 
-    conf.setCachedMNodeSizeInSchemaFileMode(
+    conf.setCachedMNodeSizeInPBTreeMode(
         Integer.parseInt(
             properties.getProperty(
-                "cached_mnode_size_in_schema_file_mode",
-                String.valueOf(conf.getCachedMNodeSizeInSchemaFileMode()))));
+                "cached_mnode_size_in_pb_tree_mode",
+                String.valueOf(conf.getCachedMNodeSizeInPBTreeMode()))));
 
-    conf.setMinimumSegmentInSchemaFile(
+    conf.setMinimumSegmentInPBTree(
         Short.parseShort(
             properties.getProperty(
-                "minimum_schema_file_segment_in_bytes",
-                String.valueOf(conf.getMinimumSegmentInSchemaFile()))));
+                "minimum_pb_tree_segment_in_bytes",
+                String.valueOf(conf.getMinimumSegmentInPBTree()))));
 
-    conf.setPageCacheSizeInSchemaFile(
+    conf.setPageCacheSizeInPBTree(
         Integer.parseInt(
             properties.getProperty(
-                "page_cache_in_schema_file", 
String.valueOf(conf.getPageCacheSizeInSchemaFile()))));
+                "page_cache_in_pb_tree", 
String.valueOf(conf.getPageCacheSizeInPBTree()))));
 
-    conf.setSchemaFileLogSize(
+    conf.setPBTreeLogSize(
         Integer.parseInt(
-            properties.getProperty(
-                "schema_file_log_size", 
String.valueOf(conf.getSchemaFileLogSize()))));
+            properties.getProperty("pb_tree_log_size", 
String.valueOf(conf.getPBTreeLogSize()))));
 
     conf.setMaxMeasurementNumOfInternalRequest(
         Integer.parseInt(
diff --git 
a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileLogCorruptedException.java
 
b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileLogCorruptedException.java
index fab22003500..22deb0727cf 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileLogCorruptedException.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileLogCorruptedException.java
@@ -27,8 +27,8 @@ public class SchemaFileLogCorruptedException extends 
MetadataException {
 
   public SchemaFileLogCorruptedException(String fileName, String reason) {
     super(
-        String.format("SchemaFileLog [%s] corrupted for [%s].", fileName, 
reason),
-        TSStatusCode.SCHEMA_FILE_REDO_LOG_BROKEN.getStatusCode(),
+        String.format("PBTreeFileLog [%s] corrupted for [%s].", fileName, 
reason),
+        TSStatusCode.PB_TREE_FILE_REDO_LOG_BROKEN.getStatusCode(),
         true);
   }
 }
diff --git 
a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileNotExists.java
 
b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileNotExists.java
index c7ffae2c3ff..04c38e68273 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileNotExists.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaFileNotExists.java
@@ -27,8 +27,8 @@ public class SchemaFileNotExists extends MetadataException {
 
   public SchemaFileNotExists(String fileName) {
     super(
-        String.format("Schema file [%s] not exists.", fileName),
-        TSStatusCode.SCHEMA_FILE_NOT_EXISTS.getStatusCode(),
+        String.format("PBTree file [%s] not exists.", fileName),
+        TSStatusCode.PB_TREE_FILE_NOT_EXISTS.getStatusCode(),
         true);
   }
 }
diff --git 
a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaPageOverflowException.java
 
b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaPageOverflowException.java
index 3903889d959..671868ca151 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaPageOverflowException.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/exception/metadata/schemafile/SchemaPageOverflowException.java
@@ -28,7 +28,8 @@ public class SchemaPageOverflowException extends 
MetadataException {
   public SchemaPageOverflowException(int pageIndex) {
     super(
         String.format(
-            "Page [%s] in schema file runs out of space or contains too many 
segments.", pageIndex),
+            "Page [%s] in pb-tree file runs out of space or contains too many 
segments.",
+            pageIndex),
         TSStatusCode.PAGE_OUT_OF_SPACE.getStatusCode(),
         true);
   }
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/MetadataConstant.java 
b/server/src/main/java/org/apache/iotdb/db/metadata/MetadataConstant.java
index f175271c401..36704a465a7 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/MetadataConstant.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/MetadataConstant.java
@@ -43,10 +43,10 @@ public class MetadataConstant {
   public static final String TAG_LOG = "tlog.txt";
   public static final String TEMPLATE_FILE = "template_log.bin";
   public static final String STORAGE_GROUP_LOG = "storage_group_log.bin";
-  public static final String SCHEMA_FILE_NAME = "schema_file.pst";
-  public static final String SCHEMA_LOG_FILE_NAME = "schema_file_log.bin";
+  public static final String PB_TREE_FILE_NAME = "pb_tree.pst";
+  public static final String PB_TREE_LOG_FILE_NAME = "pb_tree_log.bin";
 
-  public static final String SCHEMA_FILE_SNAPSHOT = "schema_file.pst.snapshot";
+  public static final String PB_TREE_SNAPSHOT = "pb_tree.pst.snapshot";
   public static final String TAG_LOG_SNAPSHOT = "tlog.txt.snapshot";
   public static final String TAG_LOG_SNAPSHOT_TMP = "tlog.txt.snapshot.tmp";
   public static final String MTREE_SNAPSHOT = "mtree.snapshot";
@@ -75,7 +75,7 @@ public class MetadataConstant {
   public static final String LOGICAL_VIEW_MNODE_TYPE_NAME = "LogicalViewMNode";
 
   public static final String SCHEMA_REGION_METRIC_NAME = "schema_region";
-  public static final String SCHEMA_ENGINE_METRIC_NAME = "schema_file";
+  public static final String SCHEMA_ENGINE_METRIC_NAME = "pb_tree";
 
   public static final String DEFAULT_SCHEMA_ENGINE_MODE = "Memory";
 
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/idtable/AppendOnlyDiskSchemaManager.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/idtable/AppendOnlyDiskSchemaManager.java
index a5110ec2e8b..d34bdb2dbfc 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/idtable/AppendOnlyDiskSchemaManager.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/idtable/AppendOnlyDiskSchemaManager.java
@@ -272,7 +272,7 @@ public class AppendOnlyDiskSchemaManager implements 
IDiskSchemaManager {
       outputStream.close();
       randomAccessFile.close();
     } catch (IOException e) {
-      logger.error("close schema file failed");
+      logger.error("close pb-tree file failed");
       throw e;
     }
   }
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/metric/SchemaEngineCachedMetric.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/metric/SchemaEngineCachedMetric.java
index 2be437e8bee..430d561bfac 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/metric/SchemaEngineCachedMetric.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/metric/SchemaEngineCachedMetric.java
@@ -33,6 +33,7 @@ import java.util.concurrent.TimeUnit;
 
 public class SchemaEngineCachedMetric implements ISchemaEngineMetric {
 
+  // TODO: rename schema_file to pb_tree
   private static final String RELEASE_THRESHOLD = 
"schema_file_release_threshold";
   private static final String FLUSH_THRESHOLD = "schema_file_flush_threshold";
   private static final String PINNED_NODE_NUM = "schema_file_pinned_num";
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/CachedMTreeStore.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/CachedMTreeStore.java
index 2270f7e6e4e..1eb95296bf6 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/CachedMTreeStore.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/CachedMTreeStore.java
@@ -136,8 +136,8 @@ public class CachedMTreeStore implements 
IMTreeStore<ICachedMNode> {
    * Get the target child node from parent. The parent must be pinned before 
invoking this method.
    * The method will try to get child node from cache. If there's no matched 
node in cache or the
    * node is not cached, which means it has been evicted, then this method 
will retrieve child node
-   * from schemaFile The returned child node will be pinned. If there's no 
matched child with the
-   * given name, this method will return null.
+   * from PBTree The returned child node will be pinned. If there's no matched 
child with the given
+   * name, this method will return null.
    *
    * @param parent parent node
    * @param name the name or alias of the target child
@@ -275,8 +275,8 @@ public class CachedMTreeStore implements 
IMTreeStore<ICachedMNode> {
   /**
    * This method will delete a node from MTree, which means the corresponding 
subTree will be
    * deleted. Before deletion, the measurementMNode in this subtree should be 
collected for updating
-   * statistics in MManager. The deletion will delete subtree in schemaFile 
first and then delete
-   * the node from memory. The target node and its ancestors should be pinned 
before invoking this
+   * statistics in MManager. The deletion will delete subtree in PBTree first 
and then delete the
+   * node from memory. The target node and its ancestors should be pinned 
before invoking this
    * problem.
    *
    * @param parent the parent node of the target node
@@ -465,7 +465,7 @@ public class CachedMTreeStore implements 
IMTreeStore<ICachedMNode> {
           file.clear();
           file.close();
         } catch (MetadataException | IOException e) {
-          logger.error(String.format("Error occurred during SchemaFile clear, 
%s", e.getMessage()));
+          logger.error(String.format("Error occurred during PBTree clear, %s", 
e.getMessage()));
         }
       }
       file = null;
@@ -540,7 +540,7 @@ public class CachedMTreeStore implements 
IMTreeStore<ICachedMNode> {
     }
   }
 
-  /** Sync all volatile nodes to schemaFile and execute memory release after 
flush. */
+  /** Sync all volatile nodes to PBTree and execute memory release after 
flush. */
   public void flushVolatileNodes() {
     try {
       IDatabaseMNode<ICachedMNode> updatedStorageGroupMNode =
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/cache/CacheMemoryManager.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/cache/CacheMemoryManager.java
index ebd23fa302d..a9d2e94d473 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/cache/CacheMemoryManager.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/cache/CacheMemoryManager.java
@@ -89,7 +89,7 @@ public class CacheMemoryManager {
     flushSemaphore = new FiniteSemaphore(2, 0);
     releaseSemaphore = new FiniteSemaphore(2, 0);
     this.engineStatistics = 
engineStatistics.getAsCachedSchemaEngineStatistics();
-    if 
(IoTDBDescriptor.getInstance().getConfig().getCachedMNodeSizeInSchemaFileMode() 
>= 0) {
+    if 
(IoTDBDescriptor.getInstance().getConfig().getCachedMNodeSizeInPBTreeMode() >= 
0) {
       releaseFlushStrategy = new 
ReleaseFlushStrategyNumBasedImpl(this.engineStatistics);
     } else {
       releaseFlushStrategy = new 
ReleaseFlushStrategySizeBasedImpl(this.engineStatistics);
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/memcontrol/ReleaseFlushStrategyNumBasedImpl.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/memcontrol/ReleaseFlushStrategyNumBasedImpl.java
index d1e851cbd70..c6caaaee67d 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/memcontrol/ReleaseFlushStrategyNumBasedImpl.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/memcontrol/ReleaseFlushStrategyNumBasedImpl.java
@@ -30,7 +30,7 @@ public class ReleaseFlushStrategyNumBasedImpl implements 
IReleaseFlushStrategy {
 
   public ReleaseFlushStrategyNumBasedImpl(CachedSchemaEngineStatistics 
engineStatistics) {
     this.engineStatistics = engineStatistics;
-    this.capacity = 
IoTDBDescriptor.getInstance().getConfig().getCachedMNodeSizeInSchemaFileMode();
+    this.capacity = 
IoTDBDescriptor.getInstance().getConfig().getCachedMNodeSizeInPBTreeMode();
   }
 
   @Override
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/ISchemaFile.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/ISchemaFile.java
index 4ba759f4f67..ce33d62801f 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/ISchemaFile.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/ISchemaFile.java
@@ -36,7 +36,7 @@ public interface ISchemaFile {
   ICachedMNode init() throws MetadataException;
 
   /**
-   * Modify header of schema file corresponding to the database node 
synchronously
+   * Modify header of pb-tree file corresponding to the database node 
synchronously
    *
    * @param sgNode node to be updated
    * @return true if success
@@ -44,7 +44,7 @@ public interface ISchemaFile {
   boolean updateDatabaseNode(IDatabaseMNode<ICachedMNode> sgNode) throws 
IOException;
 
   /**
-   * Only database node along with its descendents could be flushed into 
schema file.
+   * Only database node along with its descendents could be flushed into 
pb-tree file.
    *
    * @param node
    */
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/SchemaFile.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/SchemaFile.java
index 4cee20b46dc..1763bc925f9 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/SchemaFile.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/SchemaFile.java
@@ -59,7 +59,7 @@ public class SchemaFile implements ISchemaFile {
 
   private static final Logger logger = 
LoggerFactory.getLogger(SchemaFile.class);
 
-  // attributes for this schema file
+  // attributes for this pb-tree file
   private final String filePath;
   private final String logPath;
   private String storageGroupName;
@@ -85,8 +85,8 @@ public class SchemaFile implements ISchemaFile {
       throws IOException, MetadataException {
     String dirPath = getDirPath(sgName, schemaRegionId);
     this.storageGroupName = sgName;
-    this.filePath = dirPath + File.separator + 
MetadataConstant.SCHEMA_FILE_NAME;
-    this.logPath = dirPath + File.separator + 
MetadataConstant.SCHEMA_LOG_FILE_NAME;
+    this.filePath = dirPath + File.separator + 
MetadataConstant.PB_TREE_FILE_NAME;
+    this.logPath = dirPath + File.separator + 
MetadataConstant.PB_TREE_LOG_FILE_NAME;
 
     pmtFile = SystemFileFactory.INSTANCE.getFile(filePath);
     if (!pmtFile.exists() && !override) {
@@ -94,7 +94,7 @@ public class SchemaFile implements ISchemaFile {
     }
 
     if (pmtFile.exists() && override) {
-      logger.warn("Schema File [{}] will be overwritten since already 
exists.", filePath);
+      logger.warn("PBTree File [{}] will be overwritten since already 
exists.", filePath);
       Files.delete(Paths.get(pmtFile.toURI()));
       pmtFile.createNewFile();
     }
@@ -115,11 +115,11 @@ public class SchemaFile implements ISchemaFile {
   }
 
   private SchemaFile(File file) throws IOException, MetadataException {
-    // only used to sketch a schema file so a file object is necessary while
+    // only used to sketch a pb-tree file so a file object is necessary while
     //  components of log manipulations are not.
     pmtFile = file;
     filePath = pmtFile.getPath();
-    logPath = file.getParent() + File.separator + 
MetadataConstant.SCHEMA_LOG_FILE_NAME;
+    logPath = file.getParent() + File.separator + 
MetadataConstant.PB_TREE_LOG_FILE_NAME;
     channel = new RandomAccessFile(file, "rw").getChannel();
     headerContent = ByteBuffer.allocate(SchemaFileConfig.FILE_HEADER_SIZE);
 
@@ -138,7 +138,7 @@ public class SchemaFile implements ISchemaFile {
         SystemFileFactory.INSTANCE.getFile(
             getDirPath(sgName, schemaRegionId)
                 + File.separator
-                + MetadataConstant.SCHEMA_FILE_NAME);
+                + MetadataConstant.PB_TREE_FILE_NAME);
     return new SchemaFile(
         sgName,
         schemaRegionId,
@@ -153,7 +153,7 @@ public class SchemaFile implements ISchemaFile {
   }
 
   public static ISchemaFile loadSchemaFile(File file) throws IOException, 
MetadataException {
-    // only be called to sketch a Schema File
+    // only be called to sketch a PBTree File
     return new SchemaFile(file);
   }
 
@@ -249,7 +249,7 @@ public class SchemaFile implements ISchemaFile {
       throws MetadataException, IOException {
     if (parent.isMeasurement() || getNodeAddress(parent) < 0) {
       throw new MetadataException(
-          String.format("Node [%s] has no child in schema file.", 
parent.getFullPath()));
+          String.format("Node [%s] has no child in pb-tree file.", 
parent.getFullPath()));
     }
 
     return pageManager.getChildren(parent);
@@ -294,7 +294,7 @@ public class SchemaFile implements ISchemaFile {
     String header =
         String.format(
             "=============================\n"
-                + "== Schema File Sketch Tool ==\n"
+                + "== PBTree File Sketch Tool ==\n"
                 + "=============================\n"
                 + "== Notice: \n"
                 + "==  Internal/Entity presents as (name, is_aligned, 
child_segment_address)\n"
@@ -329,7 +329,7 @@ public class SchemaFile implements ISchemaFile {
    *         <li>b. 1 bool (1 byte): isEntityStorageGroup {@link #isEntity}
    *         <li>c. 1 int (4 bytes): hash code of template name {@link 
#sgNodeTemplateIdWithState}
    *         <li>d. 1 long (8 bytes): last segment address of database {@link 
#lastSGAddr}
-   *         <li>e. 1 int (4 bytes): version of schema file {@linkplain
+   *         <li>e. 1 int (4 bytes): version of pb-tree file {@linkplain
    *             SchemaFileConfig#SCHEMA_FILE_VERSION}
    *       </ul>
    * </ul>
@@ -338,7 +338,7 @@ public class SchemaFile implements ISchemaFile {
    */
   private void initFileHeader() throws IOException, MetadataException {
     if (channel.size() == 0) {
-      // new schema file
+      // new pb-tree file
       lastPageIndex = 0;
       ReadWriteIOUtils.write(lastPageIndex, headerContent);
       ReadWriteIOUtils.write(dataTTL, headerContent);
@@ -444,12 +444,12 @@ public class SchemaFile implements ISchemaFile {
   @Override
   public boolean createSnapshot(File snapshotDir) {
     File schemaFileSnapshot =
-        SystemFileFactory.INSTANCE.getFile(snapshotDir, 
MetadataConstant.SCHEMA_FILE_SNAPSHOT);
+        SystemFileFactory.INSTANCE.getFile(snapshotDir, 
MetadataConstant.PB_TREE_SNAPSHOT);
     try {
       sync();
       if (schemaFileSnapshot.exists() && !schemaFileSnapshot.delete()) {
         logger.error(
-            "Failed to delete old snapshot {} while creating schema file 
snapshot.",
+            "Failed to delete old snapshot {} while creating pb-tree file 
snapshot.",
             schemaFileSnapshot.getName());
         return false;
       }
@@ -465,16 +465,16 @@ public class SchemaFile implements ISchemaFile {
   public static ISchemaFile loadSnapshot(File snapshotDir, String sgName, int 
schemaRegionId)
       throws IOException, MetadataException {
     File snapshot =
-        SystemFileFactory.INSTANCE.getFile(snapshotDir, 
MetadataConstant.SCHEMA_FILE_SNAPSHOT);
+        SystemFileFactory.INSTANCE.getFile(snapshotDir, 
MetadataConstant.PB_TREE_SNAPSHOT);
     if (!snapshot.exists()) {
       throw new SchemaFileNotExists(snapshot.getPath());
     }
     File schemaFile =
         SystemFileFactory.INSTANCE.getFile(
-            getDirPath(sgName, schemaRegionId), 
MetadataConstant.SCHEMA_FILE_NAME);
+            getDirPath(sgName, schemaRegionId), 
MetadataConstant.PB_TREE_FILE_NAME);
     File schemaLogFile =
         SystemFileFactory.INSTANCE.getFile(
-            getDirPath(sgName, schemaRegionId), 
MetadataConstant.SCHEMA_LOG_FILE_NAME);
+            getDirPath(sgName, schemaRegionId), 
MetadataConstant.PB_TREE_LOG_FILE_NAME);
     Files.deleteIfExists(schemaFile.toPath());
     Files.deleteIfExists(schemaLogFile.toPath());
     Files.copy(snapshot.toPath(), schemaFile.toPath());
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/SchemaFileConfig.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/SchemaFileConfig.java
index 90004b1b366..74c49f6b61f 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/SchemaFileConfig.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/SchemaFileConfig.java
@@ -25,7 +25,7 @@ public class SchemaFileConfig {
 
   // region SchemaFile Configuration
 
-  // current version of schema file
+  // current version of pb-tree file
   public static final int SCHEMA_FILE_VERSION = 1;
 
   // folder to store .pst files
@@ -34,15 +34,13 @@ public class SchemaFileConfig {
   public static int FILE_HEADER_SIZE = 256; // size of file header in bytes
 
   public static final int PAGE_CACHE_SIZE =
-      IoTDBDescriptor.getInstance()
-          .getConfig()
-          .getPageCacheSizeInSchemaFile(); // size of page cache
+      IoTDBDescriptor.getInstance().getConfig().getPageCacheSizeInPBTree(); // 
size of page cache
 
   // size of page within one redo log, restricting log around 1GB
   public static final int SCHEMA_FILE_LOG_SIZE =
-      IoTDBDescriptor.getInstance().getConfig().getSchemaFileLogSize();
+      IoTDBDescriptor.getInstance().getConfig().getPBTreeLogSize();
 
-  // marks to note the state of schema file log
+  // marks to note the state of pb-tree file log
   public static final byte SF_PREPARE_MARK = (byte) 0xfe;
   public static final byte SF_COMMIT_MARK = (byte) 0xff;
 
@@ -73,9 +71,9 @@ public class SchemaFileConfig {
       2; // length of short, which is the type of segment offset and index
   public static final short SEG_MAX_SIZ = (short) (PAGE_LENGTH - 
PAGE_HEADER_SIZE - SEG_OFF_DIG);
   public static final short SEG_MIN_SIZ =
-      
IoTDBDescriptor.getInstance().getConfig().getMinimumSegmentInSchemaFile() > 
SEG_MAX_SIZ
+      IoTDBDescriptor.getInstance().getConfig().getMinimumSegmentInPBTree() > 
SEG_MAX_SIZ
           ? SEG_MAX_SIZ
-          : 
IoTDBDescriptor.getInstance().getConfig().getMinimumSegmentInSchemaFile();
+          : 
IoTDBDescriptor.getInstance().getConfig().getMinimumSegmentInPBTree();
 
   public static final int SEG_INDEX_DIGIT = 16; // for type short in bits
   public static final long SEG_INDEX_MASK = 0xffffL; // help to translate 
address
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/pagemgr/BTreePageManager.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/pagemgr/BTreePageManager.java
index 6861f0678a6..985a8a6d4d8 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/pagemgr/BTreePageManager.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/pagemgr/BTreePageManager.java
@@ -356,7 +356,7 @@ public class BTreePageManager extends PageManager {
     if (getNodeAddress(parent) < 0) {
       throw new MetadataException(
           String.format(
-              "Node [%s] has no valid segment address in schema file.", 
parent.getFullPath()));
+              "Node [%s] has no valid segment address in pb-tree file.", 
parent.getFullPath()));
     }
 
     long actualSegAddr = getTargetSegmentAddress(getNodeAddress(parent), 
childName);
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/pagemgr/PageManager.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/pagemgr/PageManager.java
index ecb1dca755f..8d6369d6ebb 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/pagemgr/PageManager.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/mtree/store/disk/schemafile/pagemgr/PageManager.java
@@ -267,7 +267,7 @@ public abstract class PageManager implements IPageManager {
       if (curPage.getAsSegmentedPage().read(getSegIndex(actualAddress), 
entry.getKey()) == null) {
         throw new MetadataException(
             String.format(
-                "Node[%s] has no child[%s] in schema file.", node.getName(), 
entry.getKey()));
+                "Node[%s] has no child[%s] in pb-tree file.", node.getName(), 
entry.getKey()));
       }
 
       // prepare alias comparison
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/rescon/CachedSchemaEngineStatistics.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/rescon/CachedSchemaEngineStatistics.java
index b253dd0efdd..c2e8a4c8561 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/rescon/CachedSchemaEngineStatistics.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/rescon/CachedSchemaEngineStatistics.java
@@ -21,7 +21,7 @@ package org.apache.iotdb.db.metadata.rescon;
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
- * This class is used to record global statistics for SchemaEngine in 
Schema_File mode, which is a
+ * This class is used to record global statistics for SchemaEngine in PB_Tree 
mode, which is a
  * superset of the statistics in Memory mode
  */
 public class CachedSchemaEngineStatistics extends MemSchemaEngineStatistics {
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/rescon/CachedSchemaRegionStatistics.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/rescon/CachedSchemaRegionStatistics.java
index 7633766fc73..2e9b1d22d52 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/rescon/CachedSchemaRegionStatistics.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/rescon/CachedSchemaRegionStatistics.java
@@ -23,7 +23,7 @@ import 
org.apache.iotdb.db.metadata.mtree.store.disk.cache.ICacheManager;
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
- * This class is used to record statistics within a SchemaRegion in 
Schema_File mode, which is a
+ * This class is used to record statistics within a SchemaRegion in PB_Tree 
mode, which is a
  * superset of the statistics in Memory mode
  */
 public class CachedSchemaRegionStatistics extends MemSchemaRegionStatistics {
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/rescon/SchemaResourceManager.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/rescon/SchemaResourceManager.java
index c4da4cc51e1..457b5a894f0 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/rescon/SchemaResourceManager.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/rescon/SchemaResourceManager.java
@@ -31,7 +31,7 @@ public class SchemaResourceManager {
     if (IoTDBDescriptor.getInstance()
         .getConfig()
         .getSchemaEngineMode()
-        .equals(SchemaEngineMode.Schema_File.toString())) {
+        .equals(SchemaEngineMode.PB_Tree.toString())) {
       initSchemaFileModeResource(engineStatistics);
     }
   }
@@ -40,7 +40,7 @@ public class SchemaResourceManager {
     if (IoTDBDescriptor.getInstance()
         .getConfig()
         .getSchemaEngineMode()
-        .equals(SchemaEngineMode.Schema_File.toString())) {
+        .equals(SchemaEngineMode.PB_Tree.toString())) {
       clearSchemaFileModeResource();
     }
   }
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaEngine.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaEngine.java
index e8f3fdfbb21..eef8b139424 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaEngine.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaEngine.java
@@ -209,7 +209,7 @@ public class SchemaEngine {
   public void clear() {
     schemaRegionLoader.clear();
 
-    // clearSchemaResource will shut down release and flush task in 
Schema_File mode, which must be
+    // clearSchemaResource will shut down release and flush task in PB_Tree 
mode, which must be
     // down before clear schema region
     SchemaResourceManager.clearSchemaResource();
     if (timedForceMLogThread != null) {
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaEngineMode.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaEngineMode.java
index 09132d287da..fb25af53ccc 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaEngineMode.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaEngineMode.java
@@ -21,7 +21,7 @@ package org.apache.iotdb.db.metadata.schemaregion;
 
 public enum SchemaEngineMode {
   Memory(0),
-  Schema_File(1),
+  PB_Tree(1),
   Rocksdb_based(2);
 
   private final int code;
diff --git 
a/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaRegionSchemaFileImpl.java
 
b/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaRegionPBTreeImpl.java
similarity index 97%
rename from 
server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaRegionSchemaFileImpl.java
rename to 
server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaRegionPBTreeImpl.java
index 95ec2f4e10c..493637ae44e 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaRegionSchemaFileImpl.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/metadata/schemaregion/SchemaRegionPBTreeImpl.java
@@ -129,10 +129,10 @@ import static 
org.apache.iotdb.tsfile.common.constant.TsFileConstant.PATH_SEPARA
  * </ol>
  */
 @SuppressWarnings("java:S1135") // ignore todos
-@SchemaRegion(mode = "Schema_File")
-public class SchemaRegionSchemaFileImpl implements ISchemaRegion {
+@SchemaRegion(mode = "PB_Tree")
+public class SchemaRegionPBTreeImpl implements ISchemaRegion {
 
-  private static final Logger logger = 
LoggerFactory.getLogger(SchemaRegionSchemaFileImpl.class);
+  private static final Logger logger = 
LoggerFactory.getLogger(SchemaRegionPBTreeImpl.class);
 
   protected static IoTDBConfig config = 
IoTDBDescriptor.getInstance().getConfig();
 
@@ -159,8 +159,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
   private TagManager tagManager;
 
   // region Interfaces and Implementation of initialization、snapshot、recover 
and clear
-  public SchemaRegionSchemaFileImpl(ISchemaRegionParams schemaRegionParams)
-      throws MetadataException {
+  public SchemaRegionPBTreeImpl(ISchemaRegionParams schemaRegionParams) throws 
MetadataException {
 
     storageGroupFullPath = schemaRegionParams.getDatabase().getFullPath();
     this.schemaRegionId = schemaRegionParams.getSchemaRegionId();
@@ -568,8 +567,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
         createTimeseries(plan, offset);
         done = true;
       } catch (SeriesOverflowException e) {
-        logger.warn(
-            "Too many timeseries during recovery from MLog, waiting for 
SchemaFile swapping.");
+        logger.warn("Too many timeseries during recovery from MLog, waiting 
for PBTree swapping.");
         try {
           Thread.sleep(3000L);
         } catch (InterruptedException e2) {
@@ -672,8 +670,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
         createAlignedTimeSeries(plan);
         done = true;
       } catch (SeriesOverflowException e) {
-        logger.warn(
-            "Too many timeseries during recovery from MLog, waiting for 
SchemaFile swapping.");
+        logger.warn("Too many timeseries during recovery from MLog, waiting 
for PBTree swapping.");
         try {
           Thread.sleep(3000L);
         } catch (InterruptedException e2) {
@@ -1343,20 +1340,20 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
   }
 
   private class RecoverPlanOperator
-      extends SchemaRegionPlanVisitor<RecoverOperationResult, 
SchemaRegionSchemaFileImpl> {
+      extends SchemaRegionPlanVisitor<RecoverOperationResult, 
SchemaRegionPBTreeImpl> {
 
     @Override
     public RecoverOperationResult visitSchemaRegionPlan(
-        ISchemaRegionPlan plan, SchemaRegionSchemaFileImpl context) {
+        ISchemaRegionPlan plan, SchemaRegionPBTreeImpl context) {
       throw new UnsupportedOperationException(
           String.format(
-              "SchemaRegionPlan of type %s doesn't support recover operation 
in SchemaRegionSchemaFileImpl.",
+              "SchemaRegionPlan of type %s doesn't support recover operation 
in SchemaRegionPBTreeImpl.",
               plan.getPlanType().name()));
     }
 
     @Override
     public RecoverOperationResult visitCreateTimeSeries(
-        ICreateTimeSeriesPlan createTimeSeriesPlan, SchemaRegionSchemaFileImpl 
context) {
+        ICreateTimeSeriesPlan createTimeSeriesPlan, SchemaRegionPBTreeImpl 
context) {
       try {
         recoverTimeseries(createTimeSeriesPlan, 
createTimeSeriesPlan.getTagOffset());
         return RecoverOperationResult.SUCCESS;
@@ -1367,8 +1364,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
 
     @Override
     public RecoverOperationResult visitCreateAlignedTimeSeries(
-        ICreateAlignedTimeSeriesPlan createAlignedTimeSeriesPlan,
-        SchemaRegionSchemaFileImpl context) {
+        ICreateAlignedTimeSeriesPlan createAlignedTimeSeriesPlan, 
SchemaRegionPBTreeImpl context) {
       try {
         recoverAlignedTimeSeries(createAlignedTimeSeriesPlan);
         return RecoverOperationResult.SUCCESS;
@@ -1379,7 +1375,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
 
     @Override
     public RecoverOperationResult visitDeleteTimeSeries(
-        IDeleteTimeSeriesPlan deleteTimeSeriesPlan, SchemaRegionSchemaFileImpl 
context) {
+        IDeleteTimeSeriesPlan deleteTimeSeriesPlan, SchemaRegionPBTreeImpl 
context) {
       try {
         // since we only has one path for one DeleteTimeSeriesPlan
         
deleteOneTimeseriesUpdateStatistics(deleteTimeSeriesPlan.getDeletePathList().get(0));
@@ -1391,7 +1387,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
 
     @Override
     public RecoverOperationResult visitChangeAlias(
-        IChangeAliasPlan changeAliasPlan, SchemaRegionSchemaFileImpl context) {
+        IChangeAliasPlan changeAliasPlan, SchemaRegionPBTreeImpl context) {
       try {
         changeAlias(changeAliasPlan.getPath(), changeAliasPlan.getAlias());
         return RecoverOperationResult.SUCCESS;
@@ -1402,7 +1398,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
 
     @Override
     public RecoverOperationResult visitChangeTagOffset(
-        IChangeTagOffsetPlan changeTagOffsetPlan, SchemaRegionSchemaFileImpl 
context) {
+        IChangeTagOffsetPlan changeTagOffsetPlan, SchemaRegionPBTreeImpl 
context) {
       try {
         changeOffset(changeTagOffsetPlan.getPath(), 
changeTagOffsetPlan.getOffset());
         return RecoverOperationResult.SUCCESS;
@@ -1413,7 +1409,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
 
     @Override
     public RecoverOperationResult visitAutoCreateDeviceMNode(
-        IAutoCreateDeviceMNodePlan autoCreateDeviceMNodePlan, 
SchemaRegionSchemaFileImpl context) {
+        IAutoCreateDeviceMNodePlan autoCreateDeviceMNodePlan, 
SchemaRegionPBTreeImpl context) {
       try {
         autoCreateDeviceMNode(autoCreateDeviceMNodePlan);
         return RecoverOperationResult.SUCCESS;
@@ -1425,7 +1421,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
     @Override
     public RecoverOperationResult visitActivateTemplateInCluster(
         IActivateTemplateInClusterPlan activateTemplateInClusterPlan,
-        SchemaRegionSchemaFileImpl context) {
+        SchemaRegionPBTreeImpl context) {
       try {
         recoverActivatingSchemaTemplate(activateTemplateInClusterPlan);
         return RecoverOperationResult.SUCCESS;
@@ -1436,7 +1432,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
 
     @Override
     public RecoverOperationResult visitPreDeleteTimeSeries(
-        IPreDeleteTimeSeriesPlan preDeleteTimeSeriesPlan, 
SchemaRegionSchemaFileImpl context) {
+        IPreDeleteTimeSeriesPlan preDeleteTimeSeriesPlan, 
SchemaRegionPBTreeImpl context) {
       try {
         recoverPreDeleteTimeseries(preDeleteTimeSeriesPlan.getPath());
         return RecoverOperationResult.SUCCESS;
@@ -1448,7 +1444,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
     @Override
     public RecoverOperationResult visitRollbackPreDeleteTimeSeries(
         IRollbackPreDeleteTimeSeriesPlan rollbackPreDeleteTimeSeriesPlan,
-        SchemaRegionSchemaFileImpl context) {
+        SchemaRegionPBTreeImpl context) {
       try {
         
recoverRollbackPreDeleteTimeseries(rollbackPreDeleteTimeSeriesPlan.getPath());
         return RecoverOperationResult.SUCCESS;
@@ -1459,7 +1455,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
 
     @Override
     public RecoverOperationResult visitPreDeactivateTemplate(
-        IPreDeactivateTemplatePlan preDeactivateTemplatePlan, 
SchemaRegionSchemaFileImpl context) {
+        IPreDeactivateTemplatePlan preDeactivateTemplatePlan, 
SchemaRegionPBTreeImpl context) {
       try {
         constructSchemaBlackListWithTemplate(preDeactivateTemplatePlan);
         return RecoverOperationResult.SUCCESS;
@@ -1471,7 +1467,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
     @Override
     public RecoverOperationResult visitRollbackPreDeactivateTemplate(
         IRollbackPreDeactivateTemplatePlan rollbackPreDeactivateTemplatePlan,
-        SchemaRegionSchemaFileImpl context) {
+        SchemaRegionPBTreeImpl context) {
       try {
         rollbackSchemaBlackListWithTemplate(rollbackPreDeactivateTemplatePlan);
         return RecoverOperationResult.SUCCESS;
@@ -1482,7 +1478,7 @@ public class SchemaRegionSchemaFileImpl implements 
ISchemaRegion {
 
     @Override
     public RecoverOperationResult visitDeactivateTemplate(
-        IDeactivateTemplatePlan deactivateTemplatePlan, 
SchemaRegionSchemaFileImpl context) {
+        IDeactivateTemplatePlan deactivateTemplatePlan, SchemaRegionPBTreeImpl 
context) {
       try {
         deactivateTemplateInBlackList(deactivateTemplatePlan);
         return RecoverOperationResult.SUCCESS;
diff --git 
a/server/src/main/java/org/apache/iotdb/db/protocol/influxdb/util/QueryResultUtils.java
 
b/server/src/main/java/org/apache/iotdb/db/protocol/influxdb/util/QueryResultUtils.java
index c79672169d9..8ab471370fc 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/protocol/influxdb/util/QueryResultUtils.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/protocol/influxdb/util/QueryResultUtils.java
@@ -229,7 +229,7 @@ public class QueryResultUtils {
 
   /**
    * Convert align by device query result of NewIoTDB to the query result of 
influxdb,used for
-   * Memory and schema_file schema region
+   * Memory and PB_Tree schema region
    *
    * @param tsExecuteStatementResp NewIoTDB execute statement resp to be 
converted
    * @return query results in influxdb format
diff --git 
a/server/src/main/java/org/apache/iotdb/db/tools/schema/SchemaFileSketchTool.java
 
b/server/src/main/java/org/apache/iotdb/db/tools/schema/PBTreeFileSketchTool.java
similarity index 93%
rename from 
server/src/main/java/org/apache/iotdb/db/tools/schema/SchemaFileSketchTool.java
rename to 
server/src/main/java/org/apache/iotdb/db/tools/schema/PBTreeFileSketchTool.java
index f97fb854217..a5fe4e50992 100644
--- 
a/server/src/main/java/org/apache/iotdb/db/tools/schema/SchemaFileSketchTool.java
+++ 
b/server/src/main/java/org/apache/iotdb/db/tools/schema/PBTreeFileSketchTool.java
@@ -41,13 +41,13 @@ import java.io.PrintWriter;
  * parse the {@linkplain 
org.apache.iotdb.db.metadata.mtree.store.disk.schemafile.SchemaFile} to
  * text
  */
-public class SchemaFileSketchTool {
+public class PBTreeFileSketchTool {
 
-  private static final Logger logger = 
LoggerFactory.getLogger(SchemaFileSketchTool.class);
-  private static final String SFST_CLI_PREFIX = "print-schema-file";
+  private static final Logger logger = 
LoggerFactory.getLogger(PBTreeFileSketchTool.class);
+  private static final String SFST_CLI_PREFIX = "print-pb-tree-file";
 
   private static final String FILE_ARGS = "f";
-  private static final String FILE_NAME = "schema file";
+  private static final String FILE_NAME = "pb-tree file";
 
   private static final String OUT_ARGS = "o";
   private static final String OUT_NAME = "output txt file";
@@ -72,7 +72,7 @@ public class SchemaFileSketchTool {
             .required()
             .argName(FILE_NAME)
             .hasArg()
-            .desc("Need to specify a schema file to sketch (required)")
+            .desc("Need to specify a pb-tree file to sketch (required)")
             .build();
     options.addOption(opFile);
 
diff --git 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/AliasIndexPageTest.java
 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/AliasIndexPageTest.java
index 153b66a6ca6..32aba0b4c29 100644
--- 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/AliasIndexPageTest.java
+++ 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/AliasIndexPageTest.java
@@ -38,7 +38,7 @@ public class AliasIndexPageTest {
   public void setUp() {
     IoTDBDescriptor.getInstance()
         .getConfig()
-        .setSchemaEngineMode(SchemaEngineMode.Schema_File.toString());
+        .setSchemaEngineMode(SchemaEngineMode.PB_Tree.toString());
     EnvironmentUtils.envSetUp();
   }
 
diff --git 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/InternalPageTest.java
 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/InternalPageTest.java
index fbf8db65003..6a6b0a1449d 100644
--- 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/InternalPageTest.java
+++ 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/InternalPageTest.java
@@ -38,7 +38,7 @@ public class InternalPageTest {
   public void setUp() {
     IoTDBDescriptor.getInstance()
         .getConfig()
-        .setSchemaEngineMode(SchemaEngineMode.Schema_File.toString());
+        .setSchemaEngineMode(SchemaEngineMode.PB_Tree.toString());
     EnvironmentUtils.envSetUp();
   }
 
diff --git 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/SchemaFileLogTest.java
 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/SchemaFileLogTest.java
index 777fdc9410b..fbbd7ab3f89 100644
--- 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/SchemaFileLogTest.java
+++ 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/SchemaFileLogTest.java
@@ -22,6 +22,7 @@ import org.apache.iotdb.commons.exception.MetadataException;
 import org.apache.iotdb.commons.schema.node.role.IDatabaseMNode;
 import org.apache.iotdb.commons.schema.node.utils.IMNodeFactory;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.metadata.MetadataConstant;
 import org.apache.iotdb.db.metadata.mnode.schemafile.ICachedMNode;
 import org.apache.iotdb.db.metadata.mnode.schemafile.factory.CacheMNodeFactory;
 import org.apache.iotdb.db.metadata.mtree.store.disk.schemafile.ISchemaPage;
@@ -57,7 +58,7 @@ public class SchemaFileLogTest {
   public void setUp() {
     IoTDBDescriptor.getInstance()
         .getConfig()
-        .setSchemaEngineMode(SchemaEngineMode.Schema_File.toString());
+        .setSchemaEngineMode(SchemaEngineMode.PB_Tree.toString());
     EnvironmentUtils.envSetUp();
   }
 
@@ -129,13 +130,19 @@ public class SchemaFileLogTest {
       sf.close();
     }
 
-    // modify log file to restore schema file
+    // modify log file to restore pb-tree file
     FileOutputStream outputStream = null;
     FileChannel channel;
     try {
       String[] logFilePath =
           new String[] {
-            "target", "tmp", "system", "schema", "root.test.vRoot1", "0", 
"schema_file_log.bin"
+            "target",
+            "tmp",
+            "system",
+            "schema",
+            "root.test.vRoot1",
+            "0",
+            MetadataConstant.PB_TREE_LOG_FILE_NAME
           };
       File logFile = new File(String.join(File.separator, logFilePath));
       outputStream = new FileOutputStream(logFile, true);
@@ -145,7 +152,7 @@ public class SchemaFileLogTest {
       outputStream.close();
     }
 
-    // verify that schema file has been repaired
+    // verify that pb-tree file has been repaired
     sf = (SchemaFile) SchemaFile.loadSchemaFile("root.test.vRoot1", 
TEST_SCHEMA_REGION_ID);
     res = sf.getChildren(lastNode);
     int cnt2 = 0;
diff --git 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/SchemaFileTest.java
 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/SchemaFileTest.java
index 86cb807a8e1..d9ac2c84628 100644
--- 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/SchemaFileTest.java
+++ 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/SchemaFileTest.java
@@ -68,7 +68,7 @@ public class SchemaFileTest {
   public void setUp() {
     IoTDBDescriptor.getInstance()
         .getConfig()
-        .setSchemaEngineMode(SchemaEngineMode.Schema_File.toString());
+        .setSchemaEngineMode(SchemaEngineMode.PB_Tree.toString());
     EnvironmentUtils.envSetUp();
   }
 
diff --git 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/WrappedSegmentTest.java
 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/WrappedSegmentTest.java
index 3bfc57fef6b..daf2ff3ad35 100644
--- 
a/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/WrappedSegmentTest.java
+++ 
b/server/src/test/java/org/apache/iotdb/db/metadata/mtree/schemafile/WrappedSegmentTest.java
@@ -48,7 +48,7 @@ public class WrappedSegmentTest {
   public void setUp() {
     IoTDBDescriptor.getInstance()
         .getConfig()
-        .setSchemaEngineMode(SchemaEngineMode.Schema_File.toString());
+        .setSchemaEngineMode(SchemaEngineMode.PB_Tree.toString());
     EnvironmentUtils.envSetUp();
   }
 
diff --git 
a/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/AbstractSchemaRegionTest.java
 
b/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/AbstractSchemaRegionTest.java
index 49b4b5956c3..11b61d494ae 100644
--- 
a/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/AbstractSchemaRegionTest.java
+++ 
b/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/AbstractSchemaRegionTest.java
@@ -50,9 +50,9 @@ public abstract class AbstractSchemaRegionTest {
   public static List<SchemaRegionTestParams> getTestModes() {
     return Arrays.asList(
         new SchemaRegionTestParams("MemoryMode", "Memory", -1, true),
-        new SchemaRegionTestParams("SchemaFile-FullMemory", "Schema_File", 
10000, true),
-        new SchemaRegionTestParams("SchemaFile-PartialMemory", "Schema_File", 
3, true),
-        new SchemaRegionTestParams("SchemaFile-NonMemory", "Schema_File", 0, 
true));
+        new SchemaRegionTestParams("PBTree-FullMemory", "PB_Tree", 10000, 
true),
+        new SchemaRegionTestParams("PBTree-PartialMemory", "PB_Tree", 3, true),
+        new SchemaRegionTestParams("PBTree-NonMemory", "PB_Tree", 0, true));
   }
 
   public AbstractSchemaRegionTest(SchemaRegionTestParams testParams) {
@@ -65,10 +65,10 @@ public abstract class AbstractSchemaRegionTest {
         new SchemaRegionTestParams(
             "Raw-Config",
             config.getSchemaEngineMode(),
-            config.getCachedMNodeSizeInSchemaFileMode(),
+            config.getCachedMNodeSizeInPBTreeMode(),
             config.isClusterMode());
     config.setSchemaEngineMode(testParams.schemaEngineMode);
-    config.setCachedMNodeSizeInSchemaFileMode(testParams.cachedMNodeSize);
+    config.setCachedMNodeSizeInPBTreeMode(testParams.cachedMNodeSize);
     config.setClusterMode(testParams.isClusterMode);
     SchemaEngine.getInstance().init();
   }
@@ -78,7 +78,7 @@ public abstract class AbstractSchemaRegionTest {
     SchemaEngine.getInstance().clear();
     cleanEnv();
     config.setSchemaEngineMode(rawConfig.schemaEngineMode);
-    config.setCachedMNodeSizeInSchemaFileMode(rawConfig.cachedMNodeSize);
+    config.setCachedMNodeSizeInPBTreeMode(rawConfig.cachedMNodeSize);
     config.setClusterMode(rawConfig.isClusterMode);
   }
 
diff --git 
a/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaRegionBasicTest.java
 
b/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaRegionBasicTest.java
index 2cf0efb4fee..fe5025f59f2 100644
--- 
a/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaRegionBasicTest.java
+++ 
b/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaRegionBasicTest.java
@@ -62,8 +62,8 @@ import static 
org.apache.iotdb.db.metadata.schemaRegion.SchemaRegionTestUtil.get
 
 /**
  * This class define test cases for {@link ISchemaRegion}. All test cases will 
be run in both Memory
- * and Schema_File modes. In Schema_File mode, there are three kinds of test 
environment: full
- * memory, partial memory and non memory.
+ * and PB_Tree modes. In PB_Tree mode, there are three kinds of test 
environment: full memory,
+ * partial memory and non memory.
  */
 public class SchemaRegionBasicTest extends AbstractSchemaRegionTest {
 
diff --git 
a/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaStatisticsTest.java
 
b/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaStatisticsTest.java
index e1f70963ffb..83b533d101b 100644
--- 
a/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaStatisticsTest.java
+++ 
b/server/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaStatisticsTest.java
@@ -232,7 +232,7 @@ public class SchemaStatisticsTest extends 
AbstractSchemaRegionTest {
 
   @Test
   public void testSchemaFileNodeStatistics() throws Exception {
-    if (testParams.getSchemaEngineMode().equals("Schema_File")) {
+    if (testParams.getSchemaEngineMode().equals("PB_Tree")) {
       ISchemaRegion schemaRegion1 = getSchemaRegion("root.sg1", 0);
       ISchemaRegion schemaRegion2 = getSchemaRegion("root.sg2", 1);
       CachedSchemaEngineStatistics engineStatistics =
diff --git 
a/server/src/test/java/org/apache/iotdb/db/tools/SchemaFileSketchTest.java 
b/server/src/test/java/org/apache/iotdb/db/tools/PBTreeFileSketchTest.java
similarity index 94%
rename from 
server/src/test/java/org/apache/iotdb/db/tools/SchemaFileSketchTest.java
rename to 
server/src/test/java/org/apache/iotdb/db/tools/PBTreeFileSketchTest.java
index 63a52e8e5a4..1a6140208bf 100644
--- a/server/src/test/java/org/apache/iotdb/db/tools/SchemaFileSketchTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/tools/PBTreeFileSketchTest.java
@@ -28,7 +28,7 @@ import 
org.apache.iotdb.db.metadata.mnode.schemafile.factory.CacheMNodeFactory;
 import org.apache.iotdb.db.metadata.mtree.store.disk.schemafile.ISchemaFile;
 import org.apache.iotdb.db.metadata.mtree.store.disk.schemafile.SchemaFile;
 import org.apache.iotdb.db.metadata.schemaregion.SchemaEngineMode;
-import org.apache.iotdb.db.tools.schema.SchemaFileSketchTool;
+import org.apache.iotdb.db.tools.schema.PBTreeFileSketchTool;
 import org.apache.iotdb.db.utils.EnvironmentUtils;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema;
@@ -49,7 +49,7 @@ import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.Queue;
 
-public class SchemaFileSketchTest {
+public class PBTreeFileSketchTest {
 
   private final IMNodeFactory<ICachedMNode> nodeFactory = 
CacheMNodeFactory.getInstance();
 
@@ -57,7 +57,7 @@ public class SchemaFileSketchTest {
   public void setUp() {
     IoTDBDescriptor.getInstance()
         .getConfig()
-        .setSchemaEngineMode(SchemaEngineMode.Schema_File.toString());
+        .setSchemaEngineMode(SchemaEngineMode.PB_Tree.toString());
     EnvironmentUtils.envSetUp();
   }
 
@@ -103,10 +103,10 @@ public class SchemaFileSketchTest {
                 + File.separator
                 + "0"
                 + File.separator
-                + MetadataConstant.SCHEMA_FILE_NAME);
+                + MetadataConstant.PB_TREE_FILE_NAME);
     File sketchFile = new File("sketch_schemafile.txt");
 
-    SchemaFileSketchTool.sketchFile(file.getAbsolutePath(), 
sketchFile.getAbsolutePath());
+    PBTreeFileSketchTool.sketchFile(file.getAbsolutePath(), 
sketchFile.getAbsolutePath());
     ISchemaFile sf = SchemaFile.loadSchemaFile(file);
     try {
       StringWriter sw = new StringWriter();
diff --git a/server/src/test/resources/iotdb-datanode.properties 
b/server/src/test/resources/iotdb-datanode.properties
index 2f1b5cd9b49..990090b52cc 100644
--- a/server/src/test/resources/iotdb-datanode.properties
+++ b/server/src/test/resources/iotdb-datanode.properties
@@ -22,7 +22,7 @@ dn_data_dirs=target/data
 dn_wal_dirs=target/wal
 index_root_dir=target/index
 dn_tracing_dir=target/data/tracing
-minimum_schema_file_segment_in_bytes=0
-page_cache_in_schema_file=10
+minimum_pb_tree_segment_in_bytes=0
+page_cache_in_pb_tree=10
 dn_internal_address=0.0.0.0
 dn_sync_dir=target/sync


Reply via email to