This is an automated email from the ASF dual-hosted git repository.

xuanwo pushed a commit to branch fix-stat
in repository https://gitbox.apache.org/repos/asf/incubator-opendal.git

commit 744c5cf23bdc234228826922370f4027116f524d
Author: Xuanwo <[email protected]>
AuthorDate: Tue Nov 21 17:31:03 2023 +0800

    Implement for s3
    
    Signed-off-by: Xuanwo <[email protected]>
---
 core/src/services/s3/backend.rs |  29 ++++++++--
 core/src/services/s3/core.rs    | 109 +++++++++++++++++++++++++++++++++++++
 core/src/services/s3/lister.rs  | 117 +---------------------------------------
 3 files changed, 136 insertions(+), 119 deletions(-)

diff --git a/core/src/services/s3/backend.rs b/core/src/services/s3/backend.rs
index 56663f522..bd87d726e 100644
--- a/core/src/services/s3/backend.rs
+++ b/core/src/services/s3/backend.rs
@@ -1102,6 +1102,32 @@ impl Accessor for S3Backend {
             return Ok(RpStat::new(Metadata::new(EntryMode::DIR)));
         }
 
+        if path.ends_with('/') {
+            let resp = self
+                .core
+                .s3_list_objects(path, "", "", Some(1), None)
+                .await?;
+
+            let status = resp.status();
+
+            return match status {
+                StatusCode::OK => {
+                    let bs = resp.into_body().bytes().await?;
+                    let output: Output = 
quick_xml::de::from_reader(bs.reader())
+                        .map_err(new_xml_deserialize_error)?;
+                    if !output.contents.is_empty() {
+                        Ok(RpStat::new(Metadata::new(EntryMode::DIR)))
+                    } else {
+                        Err(
+                            Error::new(ErrorKind::NotFound, "The directory is 
not found")
+                                .with_context("path", path),
+                        )
+                    }
+                }
+                _ => Err(parse_error(resp).await?),
+            };
+        }
+
         let resp = self
             .core
             .s3_head_object(path, args.if_none_match(), args.if_match())
@@ -1111,9 +1137,6 @@ impl Accessor for S3Backend {
 
         match status {
             StatusCode::OK => parse_into_metadata(path, 
resp.headers()).map(RpStat::new),
-            StatusCode::NOT_FOUND if path.ends_with('/') => {
-                Ok(RpStat::new(Metadata::new(EntryMode::DIR)))
-            }
             _ => Err(parse_error(resp).await?),
         }
     }
diff --git a/core/src/services/s3/core.rs b/core/src/services/s3/core.rs
index 1021399e4..66dbfce72 100644
--- a/core/src/services/s3/core.rs
+++ b/core/src/services/s3/core.rs
@@ -761,6 +761,40 @@ pub struct DeleteObjectsResultError {
     pub message: String,
 }
 
+/// Output of ListBucket/ListObjects.
+///
+/// ## Note
+///
+/// Use `Option` in `is_truncated` and `next_continuation_token` to make
+/// the behavior more clear so that we can be compatible to more s3 services.
+///
+/// And enable `serde(default)` so that we can keep going even when some field
+/// is not exist.
+#[derive(Default, Debug, Deserialize)]
+#[serde(default, rename_all = "PascalCase")]
+pub struct Output {
+    pub is_truncated: Option<bool>,
+    pub next_continuation_token: Option<String>,
+    pub common_prefixes: Vec<OutputCommonPrefix>,
+    pub contents: Vec<OutputContent>,
+}
+
+#[derive(Default, Debug, Eq, PartialEq, Deserialize)]
+#[serde(rename_all = "PascalCase")]
+pub struct OutputContent {
+    pub key: String,
+    pub size: u64,
+    pub last_modified: String,
+    #[serde(rename = "ETag")]
+    pub etag: Option<String>,
+}
+
+#[derive(Default, Debug, Eq, PartialEq, Deserialize)]
+#[serde(rename_all = "PascalCase")]
+pub struct OutputCommonPrefix {
+    pub prefix: String,
+}
+
 #[cfg(test)]
 mod tests {
     use bytes::Buf;
@@ -892,4 +926,79 @@ mod tests {
         assert_eq!(out.error[0].code, "AccessDenied");
         assert_eq!(out.error[0].message, "Access Denied");
     }
+
+    #[test]
+    fn test_parse_list_output() {
+        let bs = bytes::Bytes::from(
+            r#"<ListBucketResult 
xmlns="http://s3.amazonaws.com/doc/2006-03-01/";>
+  <Name>example-bucket</Name>
+  <Prefix>photos/2006/</Prefix>
+  <KeyCount>3</KeyCount>
+  <MaxKeys>1000</MaxKeys>
+  <Delimiter>/</Delimiter>
+  <IsTruncated>false</IsTruncated>
+  <Contents>
+    <Key>photos/2006</Key>
+    <LastModified>2016-04-30T23:51:29.000Z</LastModified>
+    <ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
+    <Size>56</Size>
+    <StorageClass>STANDARD</StorageClass>
+  </Contents>
+  <Contents>
+    <Key>photos/2007</Key>
+    <LastModified>2016-04-30T23:51:29.000Z</LastModified>
+    <ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
+    <Size>100</Size>
+    <StorageClass>STANDARD</StorageClass>
+  </Contents>
+  <Contents>
+    <Key>photos/2008</Key>
+    <LastModified>2016-05-30T23:51:29.000Z</LastModified>
+    <Size>42</Size>
+  </Contents>
+
+  <CommonPrefixes>
+    <Prefix>photos/2006/February/</Prefix>
+  </CommonPrefixes>
+  <CommonPrefixes>
+    <Prefix>photos/2006/January/</Prefix>
+  </CommonPrefixes>
+</ListBucketResult>"#,
+        );
+
+        let out: Output = quick_xml::de::from_reader(bs.reader()).expect("must 
success");
+
+        assert!(!out.is_truncated.unwrap());
+        assert!(out.next_continuation_token.is_none());
+        assert_eq!(
+            out.common_prefixes
+                .iter()
+                .map(|v| v.prefix.clone())
+                .collect::<Vec<String>>(),
+            vec!["photos/2006/February/", "photos/2006/January/"]
+        );
+        assert_eq!(
+            out.contents,
+            vec![
+                OutputContent {
+                    key: "photos/2006".to_string(),
+                    size: 56,
+                    etag: 
Some("\"d41d8cd98f00b204e9800998ecf8427e\"".to_string()),
+                    last_modified: "2016-04-30T23:51:29.000Z".to_string(),
+                },
+                OutputContent {
+                    key: "photos/2007".to_string(),
+                    size: 100,
+                    last_modified: "2016-04-30T23:51:29.000Z".to_string(),
+                    etag: 
Some("\"d41d8cd98f00b204e9800998ecf8427e\"".to_string()),
+                },
+                OutputContent {
+                    key: "photos/2008".to_string(),
+                    size: 42,
+                    last_modified: "2016-05-30T23:51:29.000Z".to_string(),
+                    etag: None,
+                },
+            ]
+        )
+    }
 }
diff --git a/core/src/services/s3/lister.rs b/core/src/services/s3/lister.rs
index 78d5e1ff3..cab972c1f 100644
--- a/core/src/services/s3/lister.rs
+++ b/core/src/services/s3/lister.rs
@@ -20,9 +20,8 @@ use std::sync::Arc;
 use async_trait::async_trait;
 use bytes::Buf;
 use quick_xml::de;
-use serde::Deserialize;
 
-use super::core::S3Core;
+use super::core::{Output, S3Core};
 use super::error::parse_error;
 use crate::raw::*;
 use crate::EntryMode;
@@ -138,117 +137,3 @@ impl oio::PageList for S3Lister {
         Ok(())
     }
 }
-
-/// Output of ListBucket/ListObjects.
-///
-/// ## Note
-///
-/// Use `Option` in `is_truncated` and `next_continuation_token` to make
-/// the behavior more clear so that we can be compatible to more s3 services.
-///
-/// And enable `serde(default)` so that we can keep going even when some field
-/// is not exist.
-#[derive(Default, Debug, Deserialize)]
-#[serde(default, rename_all = "PascalCase")]
-struct Output {
-    is_truncated: Option<bool>,
-    next_continuation_token: Option<String>,
-    common_prefixes: Vec<OutputCommonPrefix>,
-    contents: Vec<OutputContent>,
-}
-
-#[derive(Default, Debug, Eq, PartialEq, Deserialize)]
-#[serde(rename_all = "PascalCase")]
-struct OutputContent {
-    key: String,
-    size: u64,
-    last_modified: String,
-    #[serde(rename = "ETag")]
-    etag: Option<String>,
-}
-
-#[derive(Default, Debug, Eq, PartialEq, Deserialize)]
-#[serde(rename_all = "PascalCase")]
-struct OutputCommonPrefix {
-    prefix: String,
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[test]
-    fn test_parse_list_output() {
-        let bs = bytes::Bytes::from(
-            r#"<ListBucketResult 
xmlns="http://s3.amazonaws.com/doc/2006-03-01/";>
-  <Name>example-bucket</Name>
-  <Prefix>photos/2006/</Prefix>
-  <KeyCount>3</KeyCount>
-  <MaxKeys>1000</MaxKeys>
-  <Delimiter>/</Delimiter>
-  <IsTruncated>false</IsTruncated>
-  <Contents>
-    <Key>photos/2006</Key>
-    <LastModified>2016-04-30T23:51:29.000Z</LastModified>
-    <ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
-    <Size>56</Size>
-    <StorageClass>STANDARD</StorageClass>
-  </Contents>
-  <Contents>
-    <Key>photos/2007</Key>
-    <LastModified>2016-04-30T23:51:29.000Z</LastModified>
-    <ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
-    <Size>100</Size>
-    <StorageClass>STANDARD</StorageClass>
-  </Contents>
-  <Contents>
-    <Key>photos/2008</Key>
-    <LastModified>2016-05-30T23:51:29.000Z</LastModified>
-    <Size>42</Size>
-  </Contents>
-
-  <CommonPrefixes>
-    <Prefix>photos/2006/February/</Prefix>
-  </CommonPrefixes>
-  <CommonPrefixes>
-    <Prefix>photos/2006/January/</Prefix>
-  </CommonPrefixes>
-</ListBucketResult>"#,
-        );
-
-        let out: Output = de::from_reader(bs.reader()).expect("must success");
-
-        assert!(!out.is_truncated.unwrap());
-        assert!(out.next_continuation_token.is_none());
-        assert_eq!(
-            out.common_prefixes
-                .iter()
-                .map(|v| v.prefix.clone())
-                .collect::<Vec<String>>(),
-            vec!["photos/2006/February/", "photos/2006/January/"]
-        );
-        assert_eq!(
-            out.contents,
-            vec![
-                OutputContent {
-                    key: "photos/2006".to_string(),
-                    size: 56,
-                    etag: 
Some("\"d41d8cd98f00b204e9800998ecf8427e\"".to_string()),
-                    last_modified: "2016-04-30T23:51:29.000Z".to_string(),
-                },
-                OutputContent {
-                    key: "photos/2007".to_string(),
-                    size: 100,
-                    last_modified: "2016-04-30T23:51:29.000Z".to_string(),
-                    etag: 
Some("\"d41d8cd98f00b204e9800998ecf8427e\"".to_string()),
-                },
-                OutputContent {
-                    key: "photos/2008".to_string(),
-                    size: 42,
-                    last_modified: "2016-05-30T23:51:29.000Z".to_string(),
-                    etag: None,
-                },
-            ]
-        )
-    }
-}

Reply via email to