This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 6e04724f557 [future](merge-cloud) Add rpc fields for cloud (#29995)
6e04724f557 is described below
commit 6e04724f557f3f69cab85b09b44f858c3f8dcd5c
Author: walter <[email protected]>
AuthorDate: Mon Jan 15 21:54:34 2024 +0800
[future](merge-cloud) Add rpc fields for cloud (#29995)
---
be/src/service/backend_service.cpp | 26 +++++++
be/src/service/backend_service.h | 16 ++++
.../org/apache/doris/common/GenericPoolTest.java | 36 +++++++++
.../apache/doris/utframe/MockedBackendFactory.java | 35 +++++++++
gensrc/proto/internal_service.proto | 46 +++++++++++
gensrc/thrift/AgentService.thrift | 22 ++++++
gensrc/thrift/BackendService.thrift | 91 ++++++++++++++++++++++
gensrc/thrift/FrontendService.thrift | 8 ++
gensrc/thrift/HeartbeatService.thrift | 4 +
gensrc/thrift/MasterService.thrift | 3 +
gensrc/thrift/PaloInternalService.thrift | 3 +
11 files changed, 290 insertions(+)
diff --git a/be/src/service/backend_service.cpp
b/be/src/service/backend_service.cpp
index fb98d60dce4..306402eca0d 100644
--- a/be/src/service/backend_service.cpp
+++ b/be/src/service/backend_service.cpp
@@ -885,4 +885,30 @@ void
BackendService::query_ingest_binlog(TQueryIngestBinlogResult& result,
break;
}
}
+
+void BackendService::pre_cache_async(TPreCacheAsyncResponse& response,
+ const TPreCacheAsyncRequest& request) {
+ LOG(FATAL) << "BackendService is not implemented";
+}
+
+void BackendService::check_pre_cache(TCheckPreCacheResponse& response,
+ const TCheckPreCacheRequest& request) {
+ LOG(FATAL) << "BackendService is not implemented";
+}
+
+void BackendService::sync_load_for_tablets(TSyncLoadForTabletsResponse&
response,
+ const TSyncLoadForTabletsRequest&
request) {
+ LOG(FATAL) << "BackendService is not implemented";
+}
+
+void BackendService::get_top_n_hot_partitions(TGetTopNHotPartitionsResponse&
response,
+ const
TGetTopNHotPartitionsRequest& request) {
+ LOG(FATAL) << "BackendService is not implemented";
+}
+
+void BackendService::warm_up_tablets(TWarmUpTabletsResponse& response,
+ const TWarmUpTabletsRequest& request) {
+ LOG(FATAL) << "BackendService is not implemented";
+}
+
} // namespace doris
diff --git a/be/src/service/backend_service.h b/be/src/service/backend_service.h
index 4ee200796a6..fed63454b7b 100644
--- a/be/src/service/backend_service.h
+++ b/be/src/service/backend_service.h
@@ -142,6 +142,22 @@ public:
void query_ingest_binlog(TQueryIngestBinlogResult& result,
const TQueryIngestBinlogRequest& request)
override;
+ void pre_cache_async(TPreCacheAsyncResponse& response,
+ const TPreCacheAsyncRequest& request) override;
+
+ void check_pre_cache(TCheckPreCacheResponse& response,
+ const TCheckPreCacheRequest& request) override;
+
+ // If another cluster load, FE need to notify the cluster to sync the load
data
+ void sync_load_for_tablets(TSyncLoadForTabletsResponse& response,
+ const TSyncLoadForTabletsRequest& request)
override;
+
+ void get_top_n_hot_partitions(TGetTopNHotPartitionsResponse& response,
+ const TGetTopNHotPartitionsRequest& request)
override;
+
+ void warm_up_tablets(TWarmUpTabletsResponse& response,
+ const TWarmUpTabletsRequest& request) override;
+
private:
Status start_plan_fragment_execution(const TExecPlanFragmentParams&
exec_params);
ExecEnv* _exec_env = nullptr;
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java
b/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java
index ba66d07ec6b..eb9ac858b3e 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java
@@ -23,15 +23,21 @@ import org.apache.doris.thrift.TAgentResult;
import org.apache.doris.thrift.TAgentTaskRequest;
import org.apache.doris.thrift.TCancelPlanFragmentParams;
import org.apache.doris.thrift.TCancelPlanFragmentResult;
+import org.apache.doris.thrift.TCheckPreCacheRequest;
+import org.apache.doris.thrift.TCheckPreCacheResponse;
import org.apache.doris.thrift.TCheckStorageFormatResult;
import org.apache.doris.thrift.TDiskTrashInfo;
import org.apache.doris.thrift.TExecPlanFragmentParams;
import org.apache.doris.thrift.TExecPlanFragmentResult;
import org.apache.doris.thrift.TExportStatusResult;
import org.apache.doris.thrift.TExportTaskRequest;
+import org.apache.doris.thrift.TGetTopNHotPartitionsRequest;
+import org.apache.doris.thrift.TGetTopNHotPartitionsResponse;
import org.apache.doris.thrift.TIngestBinlogRequest;
import org.apache.doris.thrift.TIngestBinlogResult;
import org.apache.doris.thrift.TNetworkAddress;
+import org.apache.doris.thrift.TPreCacheAsyncRequest;
+import org.apache.doris.thrift.TPreCacheAsyncResponse;
import org.apache.doris.thrift.TPublishTopicRequest;
import org.apache.doris.thrift.TPublishTopicResult;
import org.apache.doris.thrift.TQueryIngestBinlogRequest;
@@ -46,10 +52,14 @@ import org.apache.doris.thrift.TScanOpenResult;
import org.apache.doris.thrift.TSnapshotRequest;
import org.apache.doris.thrift.TStatus;
import org.apache.doris.thrift.TStreamLoadRecordResult;
+import org.apache.doris.thrift.TSyncLoadForTabletsRequest;
+import org.apache.doris.thrift.TSyncLoadForTabletsResponse;
import org.apache.doris.thrift.TTabletStatResult;
import org.apache.doris.thrift.TTransmitDataParams;
import org.apache.doris.thrift.TTransmitDataResult;
import org.apache.doris.thrift.TUniqueId;
+import org.apache.doris.thrift.TWarmUpTabletsRequest;
+import org.apache.doris.thrift.TWarmUpTabletsResponse;
import org.apache.doris.utframe.UtFrameUtils;
import org.apache.commons.pool2.impl.GenericKeyedObjectPoolConfig;
@@ -237,6 +247,32 @@ public class GenericPoolTest {
throws TException {
return null;
}
+
+ @Override
+ public TPreCacheAsyncResponse preCacheAsync(TPreCacheAsyncRequest
request) throws TException {
+ return null;
+ }
+
+ @Override
+ public TCheckPreCacheResponse checkPreCache(TCheckPreCacheRequest
request) throws TException {
+ return null;
+ }
+
+ @Override
+ public TSyncLoadForTabletsResponse
syncLoadForTablets(TSyncLoadForTabletsRequest request) throws TException {
+ return null;
+ }
+
+ @Override
+ public TGetTopNHotPartitionsResponse
getTopNHotPartitions(TGetTopNHotPartitionsRequest request)
+ throws TException {
+ return null;
+ }
+
+ @Override
+ public TWarmUpTabletsResponse warmUpTablets(TWarmUpTabletsRequest
request) throws TException {
+ return null;
+ }
}
@Test
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java
b/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java
index bd1ae4c69e1..afbffaa26c9 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java
@@ -36,6 +36,8 @@ import org.apache.doris.thrift.TBackend;
import org.apache.doris.thrift.TBackendInfo;
import org.apache.doris.thrift.TCancelPlanFragmentParams;
import org.apache.doris.thrift.TCancelPlanFragmentResult;
+import org.apache.doris.thrift.TCheckPreCacheRequest;
+import org.apache.doris.thrift.TCheckPreCacheResponse;
import org.apache.doris.thrift.TCheckStorageFormatResult;
import org.apache.doris.thrift.TCloneReq;
import org.apache.doris.thrift.TDiskTrashInfo;
@@ -46,11 +48,15 @@ import org.apache.doris.thrift.TExportState;
import org.apache.doris.thrift.TExportStatusResult;
import org.apache.doris.thrift.TExportTaskRequest;
import org.apache.doris.thrift.TFinishTaskRequest;
+import org.apache.doris.thrift.TGetTopNHotPartitionsRequest;
+import org.apache.doris.thrift.TGetTopNHotPartitionsResponse;
import org.apache.doris.thrift.THeartbeatResult;
import org.apache.doris.thrift.TIngestBinlogRequest;
import org.apache.doris.thrift.TIngestBinlogResult;
import org.apache.doris.thrift.TMasterInfo;
import org.apache.doris.thrift.TNetworkAddress;
+import org.apache.doris.thrift.TPreCacheAsyncRequest;
+import org.apache.doris.thrift.TPreCacheAsyncResponse;
import org.apache.doris.thrift.TPublishTopicRequest;
import org.apache.doris.thrift.TPublishTopicResult;
import org.apache.doris.thrift.TQueryIngestBinlogRequest;
@@ -67,12 +73,16 @@ import org.apache.doris.thrift.TStatus;
import org.apache.doris.thrift.TStatusCode;
import org.apache.doris.thrift.TStorageMediumMigrateReq;
import org.apache.doris.thrift.TStreamLoadRecordResult;
+import org.apache.doris.thrift.TSyncLoadForTabletsRequest;
+import org.apache.doris.thrift.TSyncLoadForTabletsResponse;
import org.apache.doris.thrift.TTabletInfo;
import org.apache.doris.thrift.TTabletStatResult;
import org.apache.doris.thrift.TTaskType;
import org.apache.doris.thrift.TTransmitDataParams;
import org.apache.doris.thrift.TTransmitDataResult;
import org.apache.doris.thrift.TUniqueId;
+import org.apache.doris.thrift.TWarmUpTabletsRequest;
+import org.apache.doris.thrift.TWarmUpTabletsResponse;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -410,11 +420,36 @@ public class MockedBackendFactory {
return new TCheckStorageFormatResult();
}
+ @Override
+ public TPreCacheAsyncResponse preCacheAsync(TPreCacheAsyncRequest
request) throws TException {
+ return new TPreCacheAsyncResponse();
+ }
+
+ @Override
+ public TCheckPreCacheResponse checkPreCache(TCheckPreCacheRequest
request) throws TException {
+ return new TCheckPreCacheResponse();
+ }
+
@Override
public TIngestBinlogResult ingestBinlog(TIngestBinlogRequest
ingestBinlogRequest) throws TException {
return null;
}
+ @Override
+ public TSyncLoadForTabletsResponse
syncLoadForTablets(TSyncLoadForTabletsRequest request) throws TException {
+ return new TSyncLoadForTabletsResponse();
+ }
+
+ @Override
+ public TGetTopNHotPartitionsResponse
getTopNHotPartitions(TGetTopNHotPartitionsRequest request) throws TException {
+ return new TGetTopNHotPartitionsResponse();
+ }
+
+ @Override
+ public TWarmUpTabletsResponse warmUpTablets(TWarmUpTabletsRequest
request) throws TException {
+ return new TWarmUpTabletsResponse();
+ }
+
@Override
public TQueryIngestBinlogResult
queryIngestBinlog(TQueryIngestBinlogRequest queryIngestBinlogRequest)
throws TException {
diff --git a/gensrc/proto/internal_service.proto
b/gensrc/proto/internal_service.proto
index f197cd162dc..433144b304b 100644
--- a/gensrc/proto/internal_service.proto
+++ b/gensrc/proto/internal_service.proto
@@ -62,6 +62,11 @@ message PTabletWithPartition {
required int64 tablet_id = 2;
}
+message PTabletLoadRowsetInfo {
+ required int32 current_rowset_nums = 1;
+ required int32 max_config_rowset_nums = 2;
+}
+
message PTabletID {
optional int64 partition_id = 1;
optional int64 index_id = 2;
@@ -102,6 +107,7 @@ message PTabletWriterOpenRequest {
message PTabletWriterOpenResult {
required PStatus status = 1;
+ repeated PTabletLoadRowsetInfo tablet_load_rowset_num_infos = 2;
};
// add batch to tablet writer
@@ -183,6 +189,11 @@ message PTabletWriterAddBatchResult {
optional int64 wait_execution_time_us = 5;
repeated PTabletError tablet_errors = 6;
map<int64, PSuccessSlaveTabletNodeIds> success_slave_tablet_node_ids = 7;
+
+ // For cloud
+ optional int64 build_rowset_latency_ms = 1000;
+ optional int64 commit_rowset_latency_ms = 1001;
+ repeated PTabletLoadRowsetInfo tablet_load_rowset_num_infos = 1002;
};
message PTabletWriterAddBlockResult {
@@ -194,6 +205,11 @@ message PTabletWriterAddBlockResult {
repeated PTabletError tablet_errors = 6;
map<int64, PSuccessSlaveTabletNodeIds> success_slave_tablet_node_ids = 7;
optional bytes load_channel_profile = 8;
+
+ // For cloud
+ optional int64 build_rowset_latency_ms = 1000;
+ optional int64 commit_rowset_latency_ms = 1001;
+ repeated PTabletLoadRowsetInfo tablet_load_rowset_num_infos = 1002;
};
// tablet writer cancel
@@ -287,6 +303,10 @@ message PTabletKeyLookupRequest {
optional bytes output_expr = 5;
// return binary mysql row format if true
optional bool is_binary_row = 6;
+
+ // For cloud
+ // version to read
+ optional int64 version = 7;
}
message PTabletKeyLookupResponse {
@@ -698,6 +718,31 @@ message PGetTabletVersionsResponse {
repeated PVersion versions = 2;
};
+message PGetFileCacheMetaRequest {
+ repeated int64 tablet_ids = 1;
+}
+
+enum FileCacheType {
+ TTL = 0;
+ INDEX = 1;
+ NORMAL = 2;
+}
+
+message FileCacheSegmentMeta {
+ required int64 tablet_id = 1;
+ required string rowset_id = 2;
+ required int64 segment_id = 3;
+ required string file_name = 4;
+ required int64 offset = 5;
+ required int64 size = 6;
+ required FileCacheType cache_type = 7;
+ required int64 expiration_time = 8;
+}
+
+message PGetFileCacheMetaResponse {
+ repeated FileCacheSegmentMeta file_cache_segment_metas = 1;
+}
+
message PReportStreamLoadStatusRequest {
optional PUniqueId load_id = 1;
optional PStatus status = 2;
@@ -864,6 +909,7 @@ service PBackendService {
rpc response_slave_tablet_pull_rowset(PTabletWriteSlaveDoneRequest)
returns (PTabletWriteSlaveDoneResult);
rpc fetch_table_schema(PFetchTableSchemaRequest) returns
(PFetchTableSchemaResult);
rpc multiget_data(PMultiGetRequest) returns (PMultiGetResponse);
+ rpc get_file_cache_meta_by_tablet_id(PGetFileCacheMetaRequest) returns
(PGetFileCacheMetaResponse);
rpc tablet_fetch_data(PTabletKeyLookupRequest) returns
(PTabletKeyLookupResponse);
rpc get_column_ids_by_tablet_ids(PFetchColIdsRequest) returns
(PFetchColIdsResponse);
rpc get_tablet_rowset_versions(PGetTabletVersionsRequest) returns
(PGetTabletVersionsResponse);
diff --git a/gensrc/thrift/AgentService.thrift
b/gensrc/thrift/AgentService.thrift
index a1c4020b148..b9b50663c3e 100644
--- a/gensrc/thrift/AgentService.thrift
+++ b/gensrc/thrift/AgentService.thrift
@@ -151,6 +151,10 @@ struct TCreateTabletReq {
24: optional i64 time_series_compaction_file_count_threshold = 2000
25: optional i64 time_series_compaction_time_threshold_seconds = 3600
26: optional i64 time_series_compaction_empty_rowsets_threshold = 5
+
+ // For cloud
+ 1000: optional bool is_in_memory = false
+ 1001: optional bool is_persistent = false
}
struct TDropTabletReq {
@@ -193,6 +197,10 @@ struct TAlterTabletReqV2 {
9: optional Descriptors.TDescriptorTable desc_tbl
10: optional list<Descriptors.TColumn> columns
11: optional i32 be_exec_version = 0
+
+ // For cloud
+ 1000: optional i64 job_id
+ 1001: optional i64 expiration
}
struct TAlterInvertedIndexReq {
@@ -380,6 +388,17 @@ struct TPublishVersionRequest {
3: optional bool strict_mode = false
}
+struct TCalcDeleteBitmapPartitionInfo {
+ 1: required Types.TPartitionId partition_id
+ 2: required Types.TVersion version
+ 3: required list<Types.TTabletId> tablet_ids
+}
+
+struct TCalcDeleteBitmapRequest {
+ 1: required Types.TTransactionId transaction_id
+ 2: required list<TCalcDeleteBitmapPartitionInfo> partitions;
+}
+
struct TClearAlterTaskRequest {
1: required Types.TTabletId tablet_id
2: required Types.TSchemaHash schema_hash
@@ -479,6 +498,9 @@ struct TAgentTaskRequest {
31: optional TPushStoragePolicyReq push_storage_policy_req
32: optional TAlterInvertedIndexReq alter_inverted_index_req
33: optional TGcBinlogReq gc_binlog_req
+
+ // For cloud
+ 1000: optional TCalcDeleteBitmapRequest calc_delete_bitmap_req
}
struct TAgentResult {
diff --git a/gensrc/thrift/BackendService.thrift
b/gensrc/thrift/BackendService.thrift
index dab0b860677..8559698ffd7 100644
--- a/gensrc/thrift/BackendService.thrift
+++ b/gensrc/thrift/BackendService.thrift
@@ -124,6 +124,87 @@ struct TCheckStorageFormatResult {
2: optional list<i64> v2_tablets;
}
+struct TPreCacheAsyncRequest {
+ 1: required string host
+ 2: required i32 brpc_port
+ 3: required list<i64> tablet_ids
+}
+
+struct TPreCacheAsyncResponse {
+ 1: required Status.TStatus status
+}
+
+struct TCheckPreCacheRequest {
+ 1: optional list<i64> tablets
+}
+
+struct TCheckPreCacheResponse {
+ 1: required Status.TStatus status
+ 2: optional map<i64, bool> task_done;
+}
+
+struct TSyncLoadForTabletsRequest {
+ 1: required list<i64> tablet_ids
+}
+
+struct TSyncLoadForTabletsResponse {
+}
+
+struct THotPartition {
+ 1: required i64 partition_id
+ 2: required i64 last_access_time
+ 3: optional i64 query_per_day
+ 4: optional i64 query_per_week
+}
+
+struct THotTableMessage {
+ 1: required i64 table_id
+ 2: required i64 index_id
+ 3: optional list<THotPartition> hot_partitions
+}
+
+struct TGetTopNHotPartitionsRequest {
+}
+
+struct TGetTopNHotPartitionsResponse {
+ 1: required i64 file_cache_size
+ 2: optional list<THotTableMessage> hot_tables
+}
+
+enum TDownloadType {
+ BE = 0,
+ S3 = 1,
+}
+
+enum TWarmUpTabletsRequestType {
+ SET_JOB = 0,
+ SET_BATCH = 1,
+ GET_CURRENT_JOB_STATE_AND_LEASE = 2,
+ CLEAR_JOB = 3,
+}
+
+struct TJobMeta {
+ 1: required TDownloadType download_type
+ 2: optional string be_ip
+ 3: optional i32 brpc_port
+ 4: optional list<i64> tablet_ids
+}
+
+struct TWarmUpTabletsRequest {
+ 1: required i64 job_id
+ 2: required i64 batch_id
+ 3: optional list<TJobMeta> job_metas
+ 4: required TWarmUpTabletsRequestType type
+}
+
+struct TWarmUpTabletsResponse {
+ 1: required Status.TStatus status;
+ 2: optional i64 job_id
+ 3: optional i64 batch_id
+ 4: optional i64 pending_job_size
+ 5: optional i64 finish_job_size
+}
+
struct TIngestBinlogRequest {
1: optional i64 txn_id;
2: optional i64 remote_tablet_id;
@@ -291,6 +372,16 @@ service BackendService {
// check tablet rowset type
TCheckStorageFormatResult check_storage_format();
+ TPreCacheAsyncResponse pre_cache_async(1: TPreCacheAsyncRequest request);
+
+ TCheckPreCacheResponse check_pre_cache(1: TCheckPreCacheRequest request);
+
+ TSyncLoadForTabletsResponse sync_load_for_tablets(1:
TSyncLoadForTabletsRequest request);
+
+ TGetTopNHotPartitionsResponse get_top_n_hot_partitions(1:
TGetTopNHotPartitionsRequest request);
+
+ TWarmUpTabletsResponse warm_up_tablets(1: TWarmUpTabletsRequest request);
+
TIngestBinlogResult ingest_binlog(1: TIngestBinlogRequest
ingest_binlog_request);
TQueryIngestBinlogResult query_ingest_binlog(1: TQueryIngestBinlogRequest
query_ingest_binlog_request);
diff --git a/gensrc/thrift/FrontendService.thrift
b/gensrc/thrift/FrontendService.thrift
index 02d3efe50ed..e19c7e40069 100644
--- a/gensrc/thrift/FrontendService.thrift
+++ b/gensrc/thrift/FrontendService.thrift
@@ -517,6 +517,10 @@ struct TMasterOpRequest {
24: optional bool syncJournalOnly // if set to true, this request means to
do nothing but just sync max journal id of master
25: optional string defaultCatalog
26: optional string defaultDatabase
+
+ // selectdb cloud
+ 1000: optional string cloud_cluster
+ 1001: optional bool noAuth;
}
struct TColumnDefinition {
@@ -673,6 +677,7 @@ struct TStreamLoadPutRequest {
// For cloud
1000: optional string cloud_cluster
+ 1001: optional i64 table_id
}
struct TStreamLoadPutResult {
@@ -785,6 +790,9 @@ struct TLoadTxn2PCRequest {
9: optional string token
10: optional i64 thrift_rpc_timeout_ms
11: optional string label
+
+ // For cloud
+ 1000: optional string auth_code_uuid
}
struct TLoadTxn2PCResult {
diff --git a/gensrc/thrift/HeartbeatService.thrift
b/gensrc/thrift/HeartbeatService.thrift
index 5a7e47d982b..459bc2f8f31 100644
--- a/gensrc/thrift/HeartbeatService.thrift
+++ b/gensrc/thrift/HeartbeatService.thrift
@@ -51,6 +51,10 @@ struct TBackendInfo {
7: optional string be_node_role
8: optional bool is_shutdown
9: optional Types.TPort arrow_flight_sql_port
+
+ // For cloud
+ 1000: optional i64 fragment_executing_count
+ 1001: optional i64 fragment_last_active_time
}
struct THeartbeatResult {
diff --git a/gensrc/thrift/MasterService.thrift
b/gensrc/thrift/MasterService.thrift
index 9acd3f85f7b..fb5dd416bd1 100644
--- a/gensrc/thrift/MasterService.thrift
+++ b/gensrc/thrift/MasterService.thrift
@@ -46,6 +46,9 @@ struct TTabletInfo {
// 18: optional bool is_cooldown
19: optional i64 cooldown_term
20: optional Types.TUniqueId cooldown_meta_id
+
+ // For cloud
+ 1000: optional bool is_persistent
}
struct TFinishTaskRequest {
diff --git a/gensrc/thrift/PaloInternalService.thrift
b/gensrc/thrift/PaloInternalService.thrift
index 7559451e373..fb9a1888f6c 100644
--- a/gensrc/thrift/PaloInternalService.thrift
+++ b/gensrc/thrift/PaloInternalService.thrift
@@ -649,6 +649,9 @@ struct TCondition {
// using unique id to distinguish them
4: optional i32 column_unique_id
5: optional bool marked_by_runtime_filter = false
+
+ // For cloud
+ 1000: optional TCompoundType compound_type = TCompoundType.UNKNOWN
}
struct TExportStatusResult {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]