luoyuxia commented on code in PR #173:
URL: https://github.com/apache/fluss-rust/pull/173#discussion_r2724827843
##########
bindings/cpp/src/types.rs:
##########
@@ -478,3 +478,220 @@ pub fn core_lake_snapshot_to_ffi(snapshot:
&fcore::metadata::LakeSnapshot) -> ff
bucket_offsets,
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use arrow::array::{
+ BinaryArray, BooleanArray, Date32Array, FixedSizeBinaryArray,
Float32Array, Float64Array,
+ Int32Array, Int64Array, LargeBinaryArray, LargeStringArray,
RecordBatch,
+ Time32MillisecondArray, Time64MicrosecondArray,
TimestampMillisecondArray,
+ };
+ use arrow::datatypes::{DataType, Field, Schema, TimeUnit};
+ use fcore::record::{ChangeType, ScanRecord, ScanRecords};
+ use fcore::row::{ColumnarRow, InternalRow};
+ use std::collections::HashMap;
+ use std::sync::Arc;
+
+ fn make_ffi_datum(datum_type: i32) -> ffi::FfiDatum {
+ ffi::FfiDatum {
+ datum_type,
+ bool_val: false,
+ i32_val: 0,
+ i64_val: 0,
+ f32_val: 0.0,
+ f64_val: 0.0,
+ string_val: String::new(),
+ bytes_val: vec![],
+ }
+ }
+
+ #[test]
+ fn ffi_descriptor_to_core_rejects_invalid_type() {
+ let descriptor = ffi::FfiTableDescriptor {
+ schema: ffi::FfiSchema {
+ columns: vec![ffi::FfiColumn {
+ name: "bad".to_string(),
+ data_type: 999,
Review Comment:
nit: add comment:
`999` is not an valid data type.
##########
crates/fluss/src/client/connection.rs:
##########
@@ -85,3 +103,129 @@ impl FlussConnection {
Ok(FlussTable::new(self, self.metadata.clone(), table_info))
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::cluster::{Cluster, ServerNode, ServerType};
+ use crate::metadata::{DataField, DataTypes, Schema, TableDescriptor,
TableInfo, TablePath};
+ use crate::test_utils::build_mock_connection;
+ use std::collections::HashMap;
+
+ fn build_cluster() -> Arc<Cluster> {
+ let coordinator = ServerNode::new(
+ 1,
+ "127.0.0.1".to_string(),
+ 9092,
+ ServerType::CoordinatorServer,
+ );
+
+ let table_path = TablePath::new("db".to_string(), "tbl".to_string());
+ let row_type = DataTypes::row(vec![DataField::new(
+ "id".to_string(),
+ DataTypes::int(),
+ None,
+ )]);
+ let mut schema_builder = Schema::builder().with_row_type(&row_type);
+ let schema = schema_builder.build().expect("schema");
+ let descriptor = TableDescriptor::builder()
+ .schema(schema)
+ .distributed_by(Some(1), vec![])
+ .build()
+ .expect("descriptor");
+ let table_info = TableInfo::of(table_path.clone(), 1, 1, descriptor,
0, 0);
+
+ let mut table_id_by_path = HashMap::new();
+ table_id_by_path.insert(table_path.clone(), 1);
+
+ let mut table_info_by_path = HashMap::new();
+ table_info_by_path.insert(table_path, table_info);
+
+ Arc::new(Cluster::new(
+ Some(coordinator),
+ HashMap::new(),
+ HashMap::new(),
+ HashMap::new(),
+ table_id_by_path,
+ table_info_by_path,
+ ))
+ }
+
+ #[tokio::test]
+ async fn get_or_create_writer_client_is_cached() -> Result<()> {
Review Comment:
I’m wondering if we really need these tests. Most of them seem to verify
very trivial logic (like basic getters and simple caching) that is unlikely to
break. I'm concerned they might just increase our maintenance burden without
providing much validation value.
##########
crates/fluss/src/client/admin.rs:
##########
@@ -324,3 +324,225 @@ impl FlussAdmin {
Ok(tasks)
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::cluster::{ServerNode, ServerType};
+ use crate::metadata::{
+ DataField, DataTypes, DatabaseDescriptor, JsonSerde, Schema,
TableDescriptor, TablePath,
+ };
+ use crate::proto::{
+ CreateDatabaseResponse, CreateTableResponse, DatabaseExistsResponse,
DropDatabaseResponse,
+ DropTableResponse, GetDatabaseInfoResponse,
GetLatestLakeSnapshotResponse,
+ GetTableInfoResponse, ListDatabasesResponse, ListOffsetsResponse,
ListTablesResponse,
+ PbLakeSnapshotForBucket, PbListOffsetsRespForBucket,
TableExistsResponse,
+ };
+ use crate::test_utils::{build_cluster_with_coordinator_arc,
build_mock_connection};
+ use prost::Message;
+ use std::sync::Arc;
+
+ const API_CREATE_DATABASE: i16 = 1001;
+ const API_DROP_DATABASE: i16 = 1002;
+ const API_LIST_DATABASES: i16 = 1003;
+ const API_DATABASE_EXISTS: i16 = 1004;
+ const API_CREATE_TABLE: i16 = 1005;
+ const API_DROP_TABLE: i16 = 1006;
+ const API_GET_TABLE: i16 = 1007;
+ const API_LIST_TABLES: i16 = 1008;
+ const API_TABLE_EXISTS: i16 = 1010;
+ const API_LIST_OFFSETS: i16 = 1021;
+ const API_GET_LAKE_SNAPSHOT: i16 = 1032;
+ const API_GET_DATABASE_INFO: i16 = 1035;
+
+ fn build_table_descriptor() -> TableDescriptor {
+ let row_type = DataTypes::row(vec![
+ DataField::new("id".to_string(), DataTypes::int(), None),
+ DataField::new("name".to_string(), DataTypes::string(), None),
+ ]);
+ let mut schema_builder = Schema::builder().with_row_type(&row_type);
+ let schema = schema_builder.build().expect("schema");
+ TableDescriptor::builder()
+ .schema(schema)
+ .distributed_by(Some(1), vec![])
+ .build()
+ .expect("descriptor")
+ }
+
+ #[tokio::test]
+ async fn admin_requests_round_trip() -> Result<()> {
Review Comment:
Could we directly cover it in IT case for any missing method? The current
testing looks to me doesn't touch the core code path that need to be test.
##########
crates/fluss/src/client/metadata.rs:
##########
@@ -165,13 +165,137 @@ impl Metadata {
#[cfg(test)]
mod tests {
use super::*;
- use crate::metadata::{TableBucket, TablePath};
- use crate::test_utils::build_cluster_arc;
+ use crate::cluster::{BucketLocation, Cluster, ServerNode, ServerType};
+ use crate::metadata::{
+ DataField, DataTypes, JsonSerde, Schema, TableBucket, TableDescriptor,
TableInfo, TablePath,
+ };
+ use crate::proto::{
+ MetadataResponse, PbBucketMetadata, PbServerNode, PbTableMetadata,
PbTablePath,
+ };
+ use crate::test_utils::build_mock_connection;
+ use prost::Message;
+ use std::collections::{HashMap, HashSet};
+ use std::sync::Arc;
+
+ const API_UPDATE_METADATA: i16 = 1012;
+
+ fn build_table_info(table_path: TablePath, table_id: i64) -> TableInfo {
+ let row_type = DataTypes::row(vec![DataField::new(
+ "id".to_string(),
+ DataTypes::int(),
+ None,
+ )]);
+ let mut schema_builder = Schema::builder().with_row_type(&row_type);
+ let schema = schema_builder.build().expect("schema build");
+ let table_descriptor = TableDescriptor::builder()
+ .schema(schema)
+ .distributed_by(Some(1), vec![])
+ .build()
+ .expect("descriptor build");
+ TableInfo::of(table_path, table_id, 1, table_descriptor, 0, 0)
+ }
+
+ fn build_cluster(table_path: &TablePath, table_id: i64) -> Arc<Cluster> {
Review Comment:
dito
##########
crates/fluss/src/client/metadata.rs:
##########
@@ -165,13 +165,137 @@ impl Metadata {
#[cfg(test)]
mod tests {
use super::*;
- use crate::metadata::{TableBucket, TablePath};
- use crate::test_utils::build_cluster_arc;
+ use crate::cluster::{BucketLocation, Cluster, ServerNode, ServerType};
+ use crate::metadata::{
+ DataField, DataTypes, JsonSerde, Schema, TableBucket, TableDescriptor,
TableInfo, TablePath,
+ };
+ use crate::proto::{
+ MetadataResponse, PbBucketMetadata, PbServerNode, PbTableMetadata,
PbTablePath,
+ };
+ use crate::test_utils::build_mock_connection;
+ use prost::Message;
+ use std::collections::{HashMap, HashSet};
+ use std::sync::Arc;
+
+ const API_UPDATE_METADATA: i16 = 1012;
+
+ fn build_table_info(table_path: TablePath, table_id: i64) -> TableInfo {
Review Comment:
can it be replaced with methods in `test_utils`?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]