This is an automated email from the ASF dual-hosted git repository.
xuanwo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/iceberg-rust.git
The following commit(s) were added to refs/heads/main by this push:
new a796325 fix: ensure S3 and GCS integ tests are conditionally compiled
only when the storage-s3 and storage-gcs features are enabled (#552)
a796325 is described below
commit a796325d8b698ca563c8ddec7ca651b3b2a3f99a
Author: Scott Donnelly <[email protected]>
AuthorDate: Fri Aug 16 02:47:45 2024 +0100
fix: ensure S3 and GCS integ tests are conditionally compiled only when the
storage-s3 and storage-gcs features are enabled (#552)
---
crates/iceberg/tests/file_io_gcs_test.rs | 213 ++++++++++++++++---------------
crates/iceberg/tests/file_io_s3_test.rs | 138 ++++++++++----------
2 files changed, 178 insertions(+), 173 deletions(-)
diff --git a/crates/iceberg/tests/file_io_gcs_test.rs
b/crates/iceberg/tests/file_io_gcs_test.rs
index 98539e9..540cd9d 100644
--- a/crates/iceberg/tests/file_io_gcs_test.rs
+++ b/crates/iceberg/tests/file_io_gcs_test.rs
@@ -17,109 +17,112 @@
//! Integration tests for FileIO Google Cloud Storage (GCS).
-use std::collections::HashMap;
-use std::net::SocketAddr;
-use std::sync::RwLock;
-
-use bytes::Bytes;
-use ctor::{ctor, dtor};
-use iceberg::io::{FileIO, FileIOBuilder, GCS_NO_AUTH, GCS_SERVICE_PATH};
-use iceberg_test_utils::docker::DockerCompose;
-use iceberg_test_utils::{normalize_test_name, set_up};
-
-static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
-static FAKE_GCS_PORT: u16 = 4443;
-static FAKE_GCS_BUCKET: &str = "test-bucket";
-
-#[ctor]
-fn before_all() {
- let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
- let docker_compose = DockerCompose::new(
- normalize_test_name(module_path!()),
- format!("{}/testdata/file_io_gcs", env!("CARGO_MANIFEST_DIR")),
- );
- docker_compose.run();
- guard.replace(docker_compose);
-}
-
-#[dtor]
-fn after_all() {
- let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
- guard.take();
-}
-
-async fn get_file_io_gcs() -> FileIO {
- set_up();
-
- let ip = DOCKER_COMPOSE_ENV
- .read()
- .unwrap()
- .as_ref()
- .unwrap()
- .get_container_ip("gcs-server");
- let addr = SocketAddr::new(ip, FAKE_GCS_PORT);
-
- // A bucket must exist for FileIO
- create_bucket(FAKE_GCS_BUCKET, addr.to_string())
- .await
- .unwrap();
-
- FileIOBuilder::new("gcs")
- .with_props(vec![
- (GCS_SERVICE_PATH, format!("http://{}", addr)),
- (GCS_NO_AUTH, "true".to_string()),
- ])
- .build()
- .unwrap()
-}
-
-// Create a bucket against the emulated GCS storage server.
-async fn create_bucket(name: &str, server_addr: String) -> anyhow::Result<()> {
- let mut bucket_data = HashMap::new();
- bucket_data.insert("name", name);
-
- let client = reqwest::Client::new();
- let endpoint = format!("http://{}/storage/v1/b", server_addr);
- client.post(endpoint).json(&bucket_data).send().await?;
- Ok(())
-}
-
-fn get_gs_path() -> String {
- format!("gs://{}", FAKE_GCS_BUCKET)
-}
-
-#[tokio::test]
-async fn gcs_exists() {
- let file_io = get_file_io_gcs().await;
- assert!(file_io
- .is_exist(format!("{}/", get_gs_path()))
- .await
- .unwrap());
-}
-
-#[tokio::test]
-async fn gcs_write() {
- let gs_file = format!("{}/write-file", get_gs_path());
- let file_io = get_file_io_gcs().await;
- let output = file_io.new_output(&gs_file).unwrap();
- output
- .write(bytes::Bytes::from_static(b"iceberg-gcs!"))
- .await
- .expect("Write to test output file");
- assert!(file_io.is_exist(gs_file).await.unwrap())
-}
-
-#[tokio::test]
-async fn gcs_read() {
- let gs_file = format!("{}/read-gcs", get_gs_path());
- let file_io = get_file_io_gcs().await;
- let output = file_io.new_output(&gs_file).unwrap();
- output
- .write(bytes::Bytes::from_static(b"iceberg!"))
- .await
- .expect("Write to test output file");
- assert!(file_io.is_exist(&gs_file).await.unwrap());
-
- let input = file_io.new_input(gs_file).unwrap();
- assert_eq!(input.read().await.unwrap(), Bytes::from_static(b"iceberg!"));
+#[cfg(all(test, feature = "storage-gcs"))]
+mod tests {
+ use std::collections::HashMap;
+ use std::net::SocketAddr;
+ use std::sync::RwLock;
+
+ use bytes::Bytes;
+ use ctor::{ctor, dtor};
+ use iceberg::io::{FileIO, FileIOBuilder, GCS_NO_AUTH, GCS_SERVICE_PATH};
+ use iceberg_test_utils::docker::DockerCompose;
+ use iceberg_test_utils::{normalize_test_name, set_up};
+
+ static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> =
RwLock::new(None);
+ static FAKE_GCS_PORT: u16 = 4443;
+ static FAKE_GCS_BUCKET: &str = "test-bucket";
+
+ #[ctor]
+ fn before_all() {
+ let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
+ let docker_compose = DockerCompose::new(
+ normalize_test_name(module_path!()),
+ format!("{}/testdata/file_io_gcs", env!("CARGO_MANIFEST_DIR")),
+ );
+ docker_compose.run();
+ guard.replace(docker_compose);
+ }
+
+ #[dtor]
+ fn after_all() {
+ let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
+ guard.take();
+ }
+
+ async fn get_file_io_gcs() -> FileIO {
+ set_up();
+
+ let ip = DOCKER_COMPOSE_ENV
+ .read()
+ .unwrap()
+ .as_ref()
+ .unwrap()
+ .get_container_ip("gcs-server");
+ let addr = SocketAddr::new(ip, FAKE_GCS_PORT);
+
+ // A bucket must exist for FileIO
+ create_bucket(FAKE_GCS_BUCKET, addr.to_string())
+ .await
+ .unwrap();
+
+ FileIOBuilder::new("gcs")
+ .with_props(vec![
+ (GCS_SERVICE_PATH, format!("http://{}", addr)),
+ (GCS_NO_AUTH, "true".to_string()),
+ ])
+ .build()
+ .unwrap()
+ }
+
+ // Create a bucket against the emulated GCS storage server.
+ async fn create_bucket(name: &str, server_addr: String) ->
anyhow::Result<()> {
+ let mut bucket_data = HashMap::new();
+ bucket_data.insert("name", name);
+
+ let client = reqwest::Client::new();
+ let endpoint = format!("http://{}/storage/v1/b", server_addr);
+ client.post(endpoint).json(&bucket_data).send().await?;
+ Ok(())
+ }
+
+ fn get_gs_path() -> String {
+ format!("gs://{}", FAKE_GCS_BUCKET)
+ }
+
+ #[tokio::test]
+ async fn gcs_exists() {
+ let file_io = get_file_io_gcs().await;
+ assert!(file_io
+ .is_exist(format!("{}/", get_gs_path()))
+ .await
+ .unwrap());
+ }
+
+ #[tokio::test]
+ async fn gcs_write() {
+ let gs_file = format!("{}/write-file", get_gs_path());
+ let file_io = get_file_io_gcs().await;
+ let output = file_io.new_output(&gs_file).unwrap();
+ output
+ .write(bytes::Bytes::from_static(b"iceberg-gcs!"))
+ .await
+ .expect("Write to test output file");
+ assert!(file_io.is_exist(gs_file).await.unwrap())
+ }
+
+ #[tokio::test]
+ async fn gcs_read() {
+ let gs_file = format!("{}/read-gcs", get_gs_path());
+ let file_io = get_file_io_gcs().await;
+ let output = file_io.new_output(&gs_file).unwrap();
+ output
+ .write(bytes::Bytes::from_static(b"iceberg!"))
+ .await
+ .expect("Write to test output file");
+ assert!(file_io.is_exist(&gs_file).await.unwrap());
+
+ let input = file_io.new_input(gs_file).unwrap();
+ assert_eq!(input.read().await.unwrap(),
Bytes::from_static(b"iceberg!"));
+ }
}
diff --git a/crates/iceberg/tests/file_io_s3_test.rs
b/crates/iceberg/tests/file_io_s3_test.rs
index 326fdbb..32e2d12 100644
--- a/crates/iceberg/tests/file_io_s3_test.rs
+++ b/crates/iceberg/tests/file_io_s3_test.rs
@@ -16,86 +16,88 @@
// under the License.
//! Integration tests for FileIO S3.
+#[cfg(all(test, feature = "storage-s3"))]
+mod tests {
+ use std::net::SocketAddr;
+ use std::sync::RwLock;
-use std::net::SocketAddr;
-use std::sync::RwLock;
+ use ctor::{ctor, dtor};
+ use iceberg::io::{
+ FileIO, FileIOBuilder, S3_ACCESS_KEY_ID, S3_ENDPOINT, S3_REGION,
S3_SECRET_ACCESS_KEY,
+ };
+ use iceberg_test_utils::docker::DockerCompose;
+ use iceberg_test_utils::{normalize_test_name, set_up};
-use ctor::{ctor, dtor};
-use iceberg::io::{
- FileIO, FileIOBuilder, S3_ACCESS_KEY_ID, S3_ENDPOINT, S3_REGION,
S3_SECRET_ACCESS_KEY,
-};
-use iceberg_test_utils::docker::DockerCompose;
-use iceberg_test_utils::{normalize_test_name, set_up};
+ const MINIO_PORT: u16 = 9000;
+ static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> =
RwLock::new(None);
-const MINIO_PORT: u16 = 9000;
-static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
-
-#[ctor]
-fn before_all() {
- let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
- let docker_compose = DockerCompose::new(
- normalize_test_name(module_path!()),
- format!("{}/testdata/file_io_s3", env!("CARGO_MANIFEST_DIR")),
- );
- docker_compose.run();
- guard.replace(docker_compose);
-}
+ #[ctor]
+ fn before_all() {
+ let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
+ let docker_compose = DockerCompose::new(
+ normalize_test_name(module_path!()),
+ format!("{}/testdata/file_io_s3", env!("CARGO_MANIFEST_DIR")),
+ );
+ docker_compose.run();
+ guard.replace(docker_compose);
+ }
-#[dtor]
-fn after_all() {
- let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
- guard.take();
-}
+ #[dtor]
+ fn after_all() {
+ let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
+ guard.take();
+ }
-async fn get_file_io() -> FileIO {
- set_up();
+ async fn get_file_io() -> FileIO {
+ set_up();
- let guard = DOCKER_COMPOSE_ENV.read().unwrap();
- let docker_compose = guard.as_ref().unwrap();
- let container_ip = docker_compose.get_container_ip("minio");
- let minio_socket_addr = SocketAddr::new(container_ip, MINIO_PORT);
+ let guard = DOCKER_COMPOSE_ENV.read().unwrap();
+ let docker_compose = guard.as_ref().unwrap();
+ let container_ip = docker_compose.get_container_ip("minio");
+ let minio_socket_addr = SocketAddr::new(container_ip, MINIO_PORT);
- FileIOBuilder::new("s3")
- .with_props(vec![
- (S3_ENDPOINT, format!("http://{}", minio_socket_addr)),
- (S3_ACCESS_KEY_ID, "admin".to_string()),
- (S3_SECRET_ACCESS_KEY, "password".to_string()),
- (S3_REGION, "us-east-1".to_string()),
- ])
- .build()
- .unwrap()
-}
-
-#[tokio::test]
-async fn test_file_io_s3_is_exist() {
- let file_io = get_file_io().await;
- assert!(!file_io.is_exist("s3://bucket2/any").await.unwrap());
- assert!(file_io.is_exist("s3://bucket1/").await.unwrap());
-}
+ FileIOBuilder::new("s3")
+ .with_props(vec![
+ (S3_ENDPOINT, format!("http://{}", minio_socket_addr)),
+ (S3_ACCESS_KEY_ID, "admin".to_string()),
+ (S3_SECRET_ACCESS_KEY, "password".to_string()),
+ (S3_REGION, "us-east-1".to_string()),
+ ])
+ .build()
+ .unwrap()
+ }
-#[tokio::test]
-async fn test_file_io_s3_output() {
- let file_io = get_file_io().await;
- assert!(!file_io.is_exist("s3://bucket1/test_output").await.unwrap());
- let output_file = file_io.new_output("s3://bucket1/test_output").unwrap();
- {
- output_file.write("123".into()).await.unwrap();
+ #[tokio::test]
+ async fn test_file_io_s3_is_exist() {
+ let file_io = get_file_io().await;
+ assert!(!file_io.is_exist("s3://bucket2/any").await.unwrap());
+ assert!(file_io.is_exist("s3://bucket1/").await.unwrap());
}
- assert!(file_io.is_exist("s3://bucket1/test_output").await.unwrap());
-}
-#[tokio::test]
-async fn test_file_io_s3_input() {
- let file_io = get_file_io().await;
- let output_file = file_io.new_output("s3://bucket1/test_input").unwrap();
- {
- output_file.write("test_input".into()).await.unwrap();
+ #[tokio::test]
+ async fn test_file_io_s3_output() {
+ let file_io = get_file_io().await;
+ assert!(!file_io.is_exist("s3://bucket1/test_output").await.unwrap());
+ let output_file =
file_io.new_output("s3://bucket1/test_output").unwrap();
+ {
+ output_file.write("123".into()).await.unwrap();
+ }
+ assert!(file_io.is_exist("s3://bucket1/test_output").await.unwrap());
}
- let input_file = file_io.new_input("s3://bucket1/test_input").unwrap();
+ #[tokio::test]
+ async fn test_file_io_s3_input() {
+ let file_io = get_file_io().await;
+ let output_file =
file_io.new_output("s3://bucket1/test_input").unwrap();
+ {
+ output_file.write("test_input".into()).await.unwrap();
+ }
+
+ let input_file = file_io.new_input("s3://bucket1/test_input").unwrap();
- {
- let buffer = input_file.read().await.unwrap();
- assert_eq!(buffer, "test_input".as_bytes());
+ {
+ let buffer = input_file.read().await.unwrap();
+ assert_eq!(buffer, "test_input".as_bytes());
+ }
}
}