This is an automated email from the ASF dual-hosted git repository.

xuanwo pushed a commit to branch rename-to-azfile
in repository https://gitbox.apache.org/repos/asf/incubator-opendal.git

commit 71a4b5ad6067ee2c93bfbbe322ed1ecc1774618f
Author: Xuanwo <[email protected]>
AuthorDate: Wed Sep 13 13:40:13 2023 +0800

    fix: Correct the name of azdfs to azdls
    
    Signed-off-by: Xuanwo <[email protected]>
---
 .github/workflows/service_test_azdfs.yml           |  20 ++---
 CHANGELOG.md                                       |  10 +--
 README.md                                          |   4 +-
 bin/oli/README.md                                  |   2 +-
 bin/oli/src/config/mod.rs                          |   4 +-
 bindings/java/Cargo.toml                           |   2 +-
 core/Cargo.toml                                    |   4 +-
 core/README.md                                     |   4 +-
 core/src/services/{azdfs => azdls}/backend.rs      | 100 ++++++++++-----------
 core/src/services/{azdfs => azdls}/core.rs         |  28 +++---
 core/src/services/{azdfs => azdls}/docs.md         |  12 ++-
 core/src/services/{azdfs => azdls}/error.rs        |  12 +--
 core/src/services/{azdfs => azdls}/mod.rs          |   2 +-
 core/src/services/{azdfs => azdls}/pager.rs        |  20 ++---
 core/src/services/{azdfs => azdls}/writer.rs       |  22 ++---
 core/src/services/mod.rs                           |   8 +-
 core/src/types/operator/builder.rs                 |   4 +-
 core/src/types/scheme.rs                           |  12 ++-
 core/tests/behavior/main.rs                        |   4 +-
 .../index.md                                       |   2 +-
 website/docs/services/{azdfs.mdx => azdls.mdx}     |  10 +--
 .../HomepageFeatures/_feature_services.mdx         |   2 +-
 22 files changed, 148 insertions(+), 140 deletions(-)

diff --git a/.github/workflows/service_test_azdfs.yml 
b/.github/workflows/service_test_azdfs.yml
index e65f90fda..fd1405f75 100644
--- a/.github/workflows/service_test_azdfs.yml
+++ b/.github/workflows/service_test_azdfs.yml
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-name: Service Test Azdfs
+name: Service Test Azdls
 
 on:
   push:
@@ -29,15 +29,15 @@ on:
       - "core/tests/**"
       - "!core/src/docs/**"
       - "!core/src/services/**"
-      - "core/src/services/azdfs/**"
-      - ".github/workflows/service_test_azdfs.yml"
+      - "core/src/services/azdls/**"
+      - ".github/workflows/service_test_azdls.yml"
 
 concurrency:
   group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }}
   cancel-in-progress: true
 
 jobs:
-  azure_azdfs:
+  azure_azdls:
     runs-on: ubuntu-latest
     if: github.event_name == 'push' || 
!github.event.pull_request.head.repo.fork
     steps:
@@ -49,10 +49,10 @@ jobs:
       - name: Test
         shell: bash
         working-directory: core
-        run: cargo nextest run azdfs
+        run: cargo nextest run azdls
         env:
-          OPENDAL_AZDFS_TEST: ${{ secrets.OPENDAL_AZDFS_TEST }}
-          OPENDAL_AZDFS_FILESYSTEM: ${{ secrets.OPENDAL_AZDFS_FILESYSTEM }}
-          OPENDAL_AZDFS_ENDPOINT: ${{ secrets.OPENDAL_AZDFS_ENDPOINT }}
-          OPENDAL_AZDFS_ACCOUNT_NAME: ${{ secrets.OPENDAL_AZDFS_ACCOUNT_NAME }}
-          OPENDAL_AZDFS_ACCOUNT_KEY: ${{ secrets.OPENDAL_AZDFS_ACCOUNT_KEY }}
+          OPENDAL_AZDLS_TEST: ${{ secrets.OPENDAL_AZDLS_TEST }}
+          OPENDAL_AZDLS_FILESYSTEM: ${{ secrets.OPENDAL_AZDLS_FILESYSTEM }}
+          OPENDAL_AZDLS_ENDPOINT: ${{ secrets.OPENDAL_AZDLS_ENDPOINT }}
+          OPENDAL_AZDLS_ACCOUNT_NAME: ${{ secrets.OPENDAL_AZDLS_ACCOUNT_NAME }}
+          OPENDAL_AZDLS_ACCOUNT_KEY: ${{ secrets.OPENDAL_AZDLS_ACCOUNT_KEY }}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0a42062d3..20359a84b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -336,7 +336,7 @@ and this project adheres to [Semantic 
Versioning](https://semver.org/).
 
 ### Docs
 
-- docs: add service doc for azdfs (#2310)
+- docs: add service doc for azdls (#2310)
 - docs(bidnings/java): how to deploy snapshots (#2311)
 - docs(bidnings/java): how to deploy snapshots (#2311)
 - docs: Fixed links of languages to open in same tab (#2327)
@@ -648,7 +648,7 @@ and this project adheres to [Semantic 
Versioning](https://semver.org/).
 
 ### Added
 
-- feat(services/azdfs): support rename (#1929)
+- feat(services/azdls): support rename (#1929)
 - test: Increate copy/move nested path test depth (#1932)
 - feat(layers): add a basic minitrace layer (#1931)
 - feat: add Writer::abort method (#1937)
@@ -735,7 +735,7 @@ and this project adheres to [Semantic 
Versioning](https://semver.org/).
 - refactor: Remove not used blocking http client (#1895)
 - refactor: Change presign to async for future refactor (#1900)
 - refactor(services/gcs): Migrate to async reqsign (#1906)
-- refactor(services/azdfs): Migrate to async reqsign (#1903)
+- refactor(services/azdls): Migrate to async reqsign (#1903)
 - refactor(services/azblob): Adopt new reqsign (#1902)
 - refactor(services/s3): Migrate to async reqsign (#1909)
 - refactor(services/oss): Migrate to async reqsign (#1911)
@@ -1502,12 +1502,12 @@ and this project adheres to [Semantic 
Versioning](https://semver.org/).
 
 ### Added
 
-- feat: Add azdfs support (#1009)
+- feat: Add azdls support (#1009)
 - feat: Set MSRV of opendal to 1.60 (#1012)
 
 ### Docs
 
-- docs: Fix docs for azdfs service (#1010)
+- docs: Fix docs for azdls service (#1010)
 
 ## [v0.21.1] - 2022-11-26
 
diff --git a/README.md b/README.md
index ecec40478..d02cec624 100644
--- a/README.md
+++ b/README.md
@@ -51,10 +51,10 @@ Major components of the project include:
 </details>
 
 <details>
-<summary>File Storage Services (like fs, azdfs, hdfs)</summary>
+<summary>File Storage Services (like fs, azdls, hdfs)</summary>
 
 - fs: POSIX alike file system
-- azdfs: [Azure Data Lake Storage 
Gen2](https://azure.microsoft.com/en-us/products/storage/data-lake-storage/) 
services (As known as 
[abfs](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-abfs-driver))
+- azdls: [Azure Data Lake Storage 
Gen2](https://azure.microsoft.com/en-us/products/storage/data-lake-storage/) 
services (As known as 
[abfs](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-abfs-driver))
 - hdfs: [Hadoop Distributed File 
System](https://hadoop.apache.org/docs/r3.3.4/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)(HDFS)
 - ipfs: [InterPlanetary File System](https://ipfs.tech/) HTTP Gateway
 - ipmfs: [InterPlanetary File System](https://ipfs.tech/) MFS API *being 
worked on*
diff --git a/bin/oli/README.md b/bin/oli/README.md
index 6e0e37dce..2845b6a00 100644
--- a/bin/oli/README.md
+++ b/bin/oli/README.md
@@ -2,7 +2,7 @@
 
 ## What `oli` is
 
-`oli` stands for OpenDAL Command Line Interface. It aims to provide a unified 
and user-friendly way to manipulate data stored in various storage services 
such as Object storage services(s3, azblob, gcs, minio, etc.), Key-Value 
services(Redis, etcd, TiKV, etc.), Filesystem-like service(HDFS, Azdfs, etc.), 
and also [all the other supported 
services](https://opendal.apache.org/docs/category/services).
+`oli` stands for OpenDAL Command Line Interface. It aims to provide a unified 
and user-friendly way to manipulate data stored in various storage services 
such as Object storage services(s3, azblob, gcs, minio, etc.), Key-Value 
services(Redis, etcd, TiKV, etc.), Filesystem-like service(HDFS, Azdls, etc.), 
and also [all the other supported 
services](https://opendal.apache.org/docs/category/services).
 
 ## How to use `oli`
 
diff --git a/bin/oli/src/config/mod.rs b/bin/oli/src/config/mod.rs
index 40dae693d..08d2cfb6b 100644
--- a/bin/oli/src/config/mod.rs
+++ b/bin/oli/src/config/mod.rs
@@ -166,8 +166,8 @@ impl Config {
                 
Operator::from_map::<services::Azblob>(profile.clone())?.finish(),
                 path,
             )),
-            Scheme::Azdfs => Ok((
-                
Operator::from_map::<services::Azdfs>(profile.clone())?.finish(),
+            Scheme::Azdls => Ok((
+                
Operator::from_map::<services::Azdls>(profile.clone())?.finish(),
                 path,
             )),
             #[cfg(feature = "services-dashmap")]
diff --git a/bindings/java/Cargo.toml b/bindings/java/Cargo.toml
index 7d94cd71b..ee094eda4 100644
--- a/bindings/java/Cargo.toml
+++ b/bindings/java/Cargo.toml
@@ -42,7 +42,7 @@ tokio = { version = "1.28.1", features = ["full"] }
 workspace = true
 features = [
   "services-azblob",
-  "services-azdfs",
+  "services-azdls",
   "services-cacache",
   "services-cos",
   "services-dashmap",
diff --git a/core/Cargo.toml b/core/Cargo.toml
index 74e9512ab..daf6201d8 100644
--- a/core/Cargo.toml
+++ b/core/Cargo.toml
@@ -37,7 +37,7 @@ all-features = true
 default = [
   "rustls",
   "services-azblob",
-  "services-azdfs",
+  "services-azdls",
   "services-cos",
   "services-fs",
   "services-gcs",
@@ -106,7 +106,7 @@ services-azblob = [
   "reqsign?/services-azblob",
   "reqsign?/reqwest_request",
 ]
-services-azdfs = [
+services-azdls = [
   "dep:reqsign",
   "reqsign?/services-azblob",
   "reqsign?/reqwest_request",
diff --git a/core/README.md b/core/README.md
index 70fe9a3e8..3352e935d 100644
--- a/core/README.md
+++ b/core/README.md
@@ -42,10 +42,10 @@
 </details>
 
 <details>
-<summary>File Storage Services (like fs, azdfs, hdfs)</summary>
+<summary>File Storage Services (like fs, azdls, hdfs)</summary>
 
 - fs: POSIX alike file system
-- azdfs: [Azure Data Lake Storage 
Gen2](https://azure.microsoft.com/en-us/products/storage/data-lake-storage/) 
services (As known as 
[abfs](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-abfs-driver))
+- azdls: [Azure Data Lake Storage 
Gen2](https://azure.microsoft.com/en-us/products/storage/data-lake-storage/) 
services (As known as 
[abfs](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-abfs-driver))
 - hdfs: [Hadoop Distributed File 
System](https://hadoop.apache.org/docs/r3.3.4/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)(HDFS)
 - ipfs: [InterPlanetary File System](https://ipfs.tech/) HTTP Gateway
 - ipmfs: [InterPlanetary File System](https://ipfs.tech/) MFS API *being 
worked on*
diff --git a/core/src/services/azdfs/backend.rs 
b/core/src/services/azdls/backend.rs
similarity index 83%
rename from core/src/services/azdfs/backend.rs
rename to core/src/services/azdls/backend.rs
index c729ab754..e0bcdb3fd 100644
--- a/core/src/services/azdfs/backend.rs
+++ b/core/src/services/azdls/backend.rs
@@ -27,10 +27,10 @@ use reqsign::AzureStorageConfig;
 use reqsign::AzureStorageLoader;
 use reqsign::AzureStorageSigner;
 
-use super::core::AzdfsCore;
+use super::core::AzdlsCore;
 use super::error::parse_error;
-use super::pager::AzdfsPager;
-use super::writer::AzdfsWriter;
+use super::pager::AzdlsPager;
+use super::writer::AzdlsWriter;
 use crate::raw::*;
 use crate::*;
 
@@ -38,7 +38,7 @@ use crate::*;
 /// Azure public cloud: https://accountname.dfs.core.windows.net
 /// Azure US Government: https://accountname.dfs.core.usgovcloudapi.net
 /// Azure China: https://accountname.dfs.core.chinacloudapi.cn
-const KNOWN_AZDFS_ENDPOINT_SUFFIX: &[&str] = &[
+const KNOWN_AZDLS_ENDPOINT_SUFFIX: &[&str] = &[
     "dfs.core.windows.net",
     "dfs.core.usgovcloudapi.net",
     "dfs.core.chinacloudapi.cn",
@@ -47,7 +47,7 @@ const KNOWN_AZDFS_ENDPOINT_SUFFIX: &[&str] = &[
 /// Azure Data Lake Storage Gen2 Support.
 #[doc = include_str!("docs.md")]
 #[derive(Default, Clone)]
-pub struct AzdfsBuilder {
+pub struct AzdlsBuilder {
     root: Option<String>,
     filesystem: String,
     endpoint: Option<String>,
@@ -56,7 +56,7 @@ pub struct AzdfsBuilder {
     http_client: Option<HttpClient>,
 }
 
-impl Debug for AzdfsBuilder {
+impl Debug for AzdlsBuilder {
     fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
         let mut ds = f.debug_struct("Builder");
 
@@ -75,7 +75,7 @@ impl Debug for AzdfsBuilder {
     }
 }
 
-impl AzdfsBuilder {
+impl AzdlsBuilder {
     /// Set root of this backend.
     ///
     /// All operations will happen under this root.
@@ -145,9 +145,9 @@ impl AzdfsBuilder {
     }
 }
 
-impl Builder for AzdfsBuilder {
-    type Accessor = AzdfsBackend;
-    const SCHEME: Scheme = Scheme::Azdfs;
+impl Builder for AzdlsBuilder {
+    type Accessor = AzdlsBackend;
+    const SCHEME: Scheme = Scheme::Azdls;
 
     fn build(&mut self) -> Result<Self::Accessor> {
         debug!("backend build started: {:?}", &self);
@@ -160,7 +160,7 @@ impl Builder for AzdfsBuilder {
             false => Ok(&self.filesystem),
             true => Err(Error::new(ErrorKind::ConfigInvalid, "filesystem is 
empty")
                 .with_operation("Builder::build")
-                .with_context("service", Scheme::Azdfs)),
+                .with_context("service", Scheme::Azdls)),
         }?;
         debug!("backend use filesystem {}", &filesystem);
 
@@ -168,7 +168,7 @@ impl Builder for AzdfsBuilder {
             Some(endpoint) => Ok(endpoint.clone()),
             None => Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is 
empty")
                 .with_operation("Builder::build")
-                .with_context("service", Scheme::Azdfs)),
+                .with_context("service", Scheme::Azdls)),
         }?;
         debug!("backend use endpoint {}", &filesystem);
 
@@ -177,7 +177,7 @@ impl Builder for AzdfsBuilder {
         } else {
             HttpClient::new().map_err(|err| {
                 err.with_operation("Builder::build")
-                    .with_context("service", Scheme::Azdfs)
+                    .with_context("service", Scheme::Azdls)
             })?
         };
 
@@ -195,8 +195,8 @@ impl Builder for AzdfsBuilder {
         let signer = AzureStorageSigner::new();
 
         debug!("backend build finished: {:?}", &self);
-        Ok(AzdfsBackend {
-            core: Arc::new(AzdfsCore {
+        Ok(AzdlsBackend {
+            core: Arc::new(AzdlsCore {
                 filesystem: self.filesystem.clone(),
                 root,
                 endpoint,
@@ -208,7 +208,7 @@ impl Builder for AzdfsBuilder {
     }
 
     fn from_map(map: HashMap<String, String>) -> Self {
-        let mut builder = AzdfsBuilder::default();
+        let mut builder = AzdlsBuilder::default();
 
         map.get("root").map(|v| builder.root(v));
         map.get("filesystem").map(|v| builder.filesystem(v));
@@ -222,22 +222,22 @@ impl Builder for AzdfsBuilder {
 
 /// Backend for azblob services.
 #[derive(Debug, Clone)]
-pub struct AzdfsBackend {
-    core: Arc<AzdfsCore>,
+pub struct AzdlsBackend {
+    core: Arc<AzdlsCore>,
 }
 
 #[async_trait]
-impl Accessor for AzdfsBackend {
+impl Accessor for AzdlsBackend {
     type Reader = IncomingAsyncBody;
     type BlockingReader = ();
-    type Writer = oio::OneShotWriter<AzdfsWriter>;
+    type Writer = oio::OneShotWriter<AzdlsWriter>;
     type BlockingWriter = ();
-    type Pager = AzdfsPager;
+    type Pager = AzdlsPager;
     type BlockingPager = ();
 
     fn info(&self) -> AccessorInfo {
         let mut am = AccessorInfo::default();
-        am.set_scheme(Scheme::Azdfs)
+        am.set_scheme(Scheme::Azdls)
             .set_root(&self.core.root)
             .set_name(&self.core.filesystem)
             .set_native_capability(Capability {
@@ -264,7 +264,7 @@ impl Accessor for AzdfsBackend {
     async fn create_dir(&self, path: &str, _: OpCreateDir) -> 
Result<RpCreateDir> {
         let mut req =
             self.core
-                .azdfs_create_request(path, "directory", None, None, 
AsyncBody::Empty)?;
+                .azdls_create_request(path, "directory", None, None, 
AsyncBody::Empty)?;
 
         self.core.sign(&mut req).await?;
 
@@ -282,7 +282,7 @@ impl Accessor for AzdfsBackend {
     }
 
     async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, 
Self::Reader)> {
-        let resp = self.core.azdfs_read(path, args.range()).await?;
+        let resp = self.core.azdls_read(path, args.range()).await?;
 
         let status = resp.status();
 
@@ -298,12 +298,12 @@ impl Accessor for AzdfsBackend {
     async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, 
Self::Writer)> {
         Ok((
             RpWrite::default(),
-            oio::OneShotWriter::new(AzdfsWriter::new(self.core.clone(), args, 
path.to_string())),
+            oio::OneShotWriter::new(AzdlsWriter::new(self.core.clone(), args, 
path.to_string())),
         ))
     }
 
     async fn rename(&self, from: &str, to: &str, _args: OpRename) -> 
Result<RpRename> {
-        if let Some(resp) = self.core.azdfs_ensure_parent_path(to).await? {
+        if let Some(resp) = self.core.azdls_ensure_parent_path(to).await? {
             let status = resp.status();
             match status {
                 StatusCode::CREATED | StatusCode::CONFLICT => {
@@ -313,7 +313,7 @@ impl Accessor for AzdfsBackend {
             }
         }
 
-        let resp = self.core.azdfs_rename(from, to).await?;
+        let resp = self.core.azdls_rename(from, to).await?;
 
         let status = resp.status();
 
@@ -332,7 +332,7 @@ impl Accessor for AzdfsBackend {
             return Ok(RpStat::new(Metadata::new(EntryMode::DIR)));
         }
 
-        let resp = self.core.azdfs_get_properties(path).await?;
+        let resp = self.core.azdls_get_properties(path).await?;
 
         let status = resp.status();
 
@@ -346,7 +346,7 @@ impl Accessor for AzdfsBackend {
     }
 
     async fn delete(&self, path: &str, _: OpDelete) -> Result<RpDelete> {
-        let resp = self.core.azdfs_delete(path).await?;
+        let resp = self.core.azdls_delete(path).await?;
 
         let status = resp.status();
 
@@ -357,7 +357,7 @@ impl Accessor for AzdfsBackend {
     }
 
     async fn list(&self, path: &str, args: OpList) -> Result<(RpList, 
Self::Pager)> {
-        let op = AzdfsPager::new(self.core.clone(), path.to_string(), 
args.limit());
+        let op = AzdlsPager::new(self.core.clone(), path.to_string(), 
args.limit());
 
         Ok((RpList::default(), op))
     }
@@ -377,7 +377,7 @@ fn infer_storage_name_from_endpoint(endpoint: &str) -> 
Option<String> {
         .trim_end_matches('/')
         .to_lowercase();
 
-    if KNOWN_AZDFS_ENDPOINT_SUFFIX
+    if KNOWN_AZDLS_ENDPOINT_SUFFIX
         .iter()
         .any(|s| *s == endpoint_suffix.as_str())
     {
@@ -389,8 +389,8 @@ fn infer_storage_name_from_endpoint(endpoint: &str) -> 
Option<String> {
 
 #[cfg(test)]
 mod tests {
-    use super::AzdfsBuilder;
-    use crate::services::azdfs::backend::infer_storage_name_from_endpoint;
+    use super::AzdlsBuilder;
+    use crate::services::azdls::backend::infer_storage_name_from_endpoint;
     use crate::Builder;
 
     #[test]
@@ -409,43 +409,43 @@ mod tests {
 
     #[test]
     fn test_builder_from_endpoint_and_key_infer_account_name() {
-        let mut azdfs_builder = AzdfsBuilder::default();
-        
azdfs_builder.endpoint("https://storagesample.dfs.core.chinacloudapi.cn";);
-        azdfs_builder.account_key("account-key");
-        azdfs_builder.filesystem("filesystem");
-        let azdfs = azdfs_builder
+        let mut azdls_builder = AzdlsBuilder::default();
+        
azdls_builder.endpoint("https://storagesample.dfs.core.chinacloudapi.cn";);
+        azdls_builder.account_key("account-key");
+        azdls_builder.filesystem("filesystem");
+        let azdls = azdls_builder
             .build()
-            .expect("build azdfs should be succeeded.");
+            .expect("build Azdls should be succeeded.");
 
         assert_eq!(
-            azdfs.core.endpoint,
+            azdls.core.endpoint,
             "https://storagesample.dfs.core.chinacloudapi.cn";
         );
 
-        assert_eq!(azdfs.core.filesystem, "filesystem".to_string());
+        assert_eq!(azdls.core.filesystem, "filesystem".to_string());
 
         assert_eq!(
-            azdfs_builder.account_key.unwrap(),
+            azdls_builder.account_key.unwrap(),
             "account-key".to_string()
         );
     }
 
     #[test]
     fn test_no_key_wont_infer_account_name() {
-        let mut azdfs_builder = AzdfsBuilder::default();
-        azdfs_builder.endpoint("https://storagesample.dfs.core.windows.net";);
-        azdfs_builder.filesystem("filesystem");
-        let azdfs = azdfs_builder
+        let mut azdls_builder = AzdlsBuilder::default();
+        azdls_builder.endpoint("https://storagesample.dfs.core.windows.net";);
+        azdls_builder.filesystem("filesystem");
+        let azdls = azdls_builder
             .build()
-            .expect("build azdfs should be succeeded.");
+            .expect("build Azdls should be succeeded.");
 
         assert_eq!(
-            azdfs.core.endpoint,
+            azdls.core.endpoint,
             "https://storagesample.dfs.core.windows.net";
         );
 
-        assert_eq!(azdfs.core.filesystem, "filesystem".to_string());
+        assert_eq!(azdls.core.filesystem, "filesystem".to_string());
 
-        assert_eq!(azdfs_builder.account_key, None);
+        assert_eq!(azdls_builder.account_key, None);
     }
 }
diff --git a/core/src/services/azdfs/core.rs b/core/src/services/azdls/core.rs
similarity index 94%
rename from core/src/services/azdfs/core.rs
rename to core/src/services/azdls/core.rs
index 409216d17..bbffa2cbe 100644
--- a/core/src/services/azdfs/core.rs
+++ b/core/src/services/azdls/core.rs
@@ -37,7 +37,7 @@ use crate::*;
 const X_MS_RENAME_SOURCE: &str = "x-ms-rename-source";
 const X_MS_VERSION: &str = "x-ms-version";
 
-pub struct AzdfsCore {
+pub struct AzdlsCore {
     pub filesystem: String,
     pub root: String,
     pub endpoint: String,
@@ -47,9 +47,9 @@ pub struct AzdfsCore {
     pub signer: AzureStorageSigner,
 }
 
-impl Debug for AzdfsCore {
+impl Debug for AzdlsCore {
     fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
-        f.debug_struct("AzdfsCore")
+        f.debug_struct("AzdlsCore")
             .field("filesystem", &self.filesystem)
             .field("root", &self.root)
             .field("endpoint", &self.endpoint)
@@ -57,7 +57,7 @@ impl Debug for AzdfsCore {
     }
 }
 
-impl AzdfsCore {
+impl AzdlsCore {
     async fn load_credential(&self) -> Result<AzureStorageCredential> {
         let cred = self
             .loader
@@ -96,8 +96,8 @@ impl AzdfsCore {
     }
 }
 
-impl AzdfsCore {
-    pub async fn azdfs_read(
+impl AzdlsCore {
+    pub async fn azdls_read(
         &self,
         path: &str,
         range: BytesRange,
@@ -138,7 +138,7 @@ impl AzdfsCore {
     /// resource should be one of `file` or `directory`
     ///
     /// ref: 
https://learn.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create
-    pub fn azdfs_create_request(
+    pub fn azdls_create_request(
         &self,
         path: &str,
         resource: &str,
@@ -176,7 +176,7 @@ impl AzdfsCore {
         Ok(req)
     }
 
-    pub async fn azdfs_rename(&self, from: &str, to: &str) -> 
Result<Response<IncomingAsyncBody>> {
+    pub async fn azdls_rename(&self, from: &str, to: &str) -> 
Result<Response<IncomingAsyncBody>> {
         let source = build_abs_path(&self.root, from);
         let target = build_abs_path(&self.root, to);
 
@@ -201,7 +201,7 @@ impl AzdfsCore {
     }
 
     /// ref: 
https://learn.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/update
-    pub fn azdfs_update_request(
+    pub fn azdls_update_request(
         &self,
         path: &str,
         size: Option<usize>,
@@ -230,7 +230,7 @@ impl AzdfsCore {
         Ok(req)
     }
 
-    pub async fn azdfs_get_properties(&self, path: &str) -> 
Result<Response<IncomingAsyncBody>> {
+    pub async fn azdls_get_properties(&self, path: &str) -> 
Result<Response<IncomingAsyncBody>> {
         let p = build_abs_path(&self.root, path)
             .trim_end_matches('/')
             .to_string();
@@ -252,7 +252,7 @@ impl AzdfsCore {
         self.client.send(req).await
     }
 
-    pub async fn azdfs_delete(&self, path: &str) -> 
Result<Response<IncomingAsyncBody>> {
+    pub async fn azdls_delete(&self, path: &str) -> 
Result<Response<IncomingAsyncBody>> {
         let p = build_abs_path(&self.root, path)
             .trim_end_matches('/')
             .to_string();
@@ -274,7 +274,7 @@ impl AzdfsCore {
         self.send(req).await
     }
 
-    pub async fn azdfs_list(
+    pub async fn azdls_list(
         &self,
         path: &str,
         continuation: &str,
@@ -307,7 +307,7 @@ impl AzdfsCore {
         self.send(req).await
     }
 
-    pub async fn azdfs_ensure_parent_path(
+    pub async fn azdls_ensure_parent_path(
         &self,
         path: &str,
     ) -> Result<Option<Response<IncomingAsyncBody>>> {
@@ -325,7 +325,7 @@ impl AzdfsCore {
         if !parts.is_empty() {
             let parent_path = parts.join("/");
             let mut req =
-                self.azdfs_create_request(&parent_path, "directory", None, 
None, AsyncBody::Empty)?;
+                self.azdls_create_request(&parent_path, "directory", None, 
None, AsyncBody::Empty)?;
 
             self.sign(&mut req).await?;
 
diff --git a/core/src/services/azdfs/docs.md b/core/src/services/azdls/docs.md
similarity index 75%
rename from core/src/services/azdfs/docs.md
rename to core/src/services/azdls/docs.md
index 23b283d1c..04c72245d 100644
--- a/core/src/services/azdfs/docs.md
+++ b/core/src/services/azdls/docs.md
@@ -1,6 +1,10 @@
-As known as `abfs`, `azdfs` or `azdls`.
+As known as `abfs`, `azdls` or `azdls`.
 
-This service will visist the 
[ABFS](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-abfs-driver)
 URI supported by [Azure Data Lake Storage 
Gen2](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction).
+This service will visit the 
[ABFS](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-abfs-driver)
 URI supported by [Azure Data Lake Storage 
Gen2](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction).
+
+## Notes
+
+`azdls` is different from `azfile` service which used to visit [Azure File 
Storage](https://azure.microsoft.com/en-us/services/storage/files/).
 
 ## Capabilities
 
@@ -36,13 +40,13 @@ Refer to public API docs for more information.
 use std::sync::Arc;
 
 use anyhow::Result;
-use opendal::services::Azdfs;
+use opendal::services::Azdls;
 use opendal::Operator;
 
 #[tokio::main]
 async fn main() -> Result<()> {
     // Create azblob backend builder.
-    let mut builder = Azdfs::default();
+    let mut builder = Azdls::default();
     // Set the root for azblob, all operations will happen under this root.
     //
     // NOTE: the root must be absolute path.
diff --git a/core/src/services/azdfs/error.rs b/core/src/services/azdls/error.rs
similarity index 92%
rename from core/src/services/azdfs/error.rs
rename to core/src/services/azdls/error.rs
index ad0cc94e1..70b165398 100644
--- a/core/src/services/azdfs/error.rs
+++ b/core/src/services/azdls/error.rs
@@ -28,10 +28,10 @@ use crate::Error;
 use crate::ErrorKind;
 use crate::Result;
 
-/// AzdfsError is the error returned by azure dfs service.
+/// AzdlsError is the error returned by azure dfs service.
 #[derive(Default, Deserialize)]
 #[serde(default, rename_all = "PascalCase")]
-struct AzdfsError {
+struct AzdlsError {
     code: String,
     message: String,
     query_parameter_name: String,
@@ -39,9 +39,9 @@ struct AzdfsError {
     reason: String,
 }
 
-impl Debug for AzdfsError {
+impl Debug for AzdlsError {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        let mut de = f.debug_struct("AzdfsError");
+        let mut de = f.debug_struct("AzdlsError");
         de.field("code", &self.code);
         // replace `\n` to ` ` for better reading.
         de.field("message", &self.message.replace('\n', " "));
@@ -76,7 +76,7 @@ pub async fn parse_error(resp: Response<IncomingAsyncBody>) 
-> Result<Error> {
         _ => (ErrorKind::Unexpected, false),
     };
 
-    let mut message = match de::from_reader::<_, 
AzdfsError>(bs.clone().reader()) {
+    let mut message = match de::from_reader::<_, 
AzdlsError>(bs.clone().reader()) {
         Ok(azblob_err) => format!("{azblob_err:?}"),
         Err(_) => String::from_utf8_lossy(&bs).into_owned(),
     };
@@ -86,7 +86,7 @@ pub async fn parse_error(resp: Response<IncomingAsyncBody>) 
-> Result<Error> {
             if let Ok(code) = v.to_str() {
                 message = format!(
                     "{:?}",
-                    AzdfsError {
+                    AzdlsError {
                         code: code.to_string(),
                         ..Default::default()
                     }
diff --git a/core/src/services/azdfs/mod.rs b/core/src/services/azdls/mod.rs
similarity index 95%
rename from core/src/services/azdfs/mod.rs
rename to core/src/services/azdls/mod.rs
index db015ca95..0207e1f6c 100644
--- a/core/src/services/azdfs/mod.rs
+++ b/core/src/services/azdls/mod.rs
@@ -16,7 +16,7 @@
 // under the License.
 
 mod backend;
-pub use backend::AzdfsBuilder as Azdfs;
+pub use backend::AzdlsBuilder as Azdls;
 
 mod core;
 mod error;
diff --git a/core/src/services/azdfs/pager.rs b/core/src/services/azdls/pager.rs
similarity index 92%
rename from core/src/services/azdfs/pager.rs
rename to core/src/services/azdls/pager.rs
index e3e29a027..19f67d007 100644
--- a/core/src/services/azdfs/pager.rs
+++ b/core/src/services/azdls/pager.rs
@@ -21,13 +21,13 @@ use async_trait::async_trait;
 use serde::Deserialize;
 use serde_json::de;
 
-use super::core::AzdfsCore;
+use super::core::AzdlsCore;
 use super::error::parse_error;
 use crate::raw::*;
 use crate::*;
 
-pub struct AzdfsPager {
-    core: Arc<AzdfsCore>,
+pub struct AzdlsPager {
+    core: Arc<AzdlsCore>,
 
     path: String,
     limit: Option<usize>,
@@ -36,8 +36,8 @@ pub struct AzdfsPager {
     done: bool,
 }
 
-impl AzdfsPager {
-    pub fn new(core: Arc<AzdfsCore>, path: String, limit: Option<usize>) -> 
Self {
+impl AzdlsPager {
+    pub fn new(core: Arc<AzdlsCore>, path: String, limit: Option<usize>) -> 
Self {
         Self {
             core,
             path,
@@ -50,7 +50,7 @@ impl AzdfsPager {
 }
 
 #[async_trait]
-impl oio::Page for AzdfsPager {
+impl oio::Page for AzdlsPager {
     async fn next(&mut self) -> Result<Option<Vec<oio::Entry>>> {
         if self.done {
             return Ok(None);
@@ -58,10 +58,10 @@ impl oio::Page for AzdfsPager {
 
         let resp = self
             .core
-            .azdfs_list(&self.path, &self.continuation, self.limit)
+            .azdls_list(&self.path, &self.continuation, self.limit)
             .await?;
 
-        // Azdfs will return not found for not-exist path.
+        // Azdls will return not found for not-exist path.
         if resp.status() == http::StatusCode::NOT_FOUND {
             resp.into_body().consume().await?;
             return Ok(None);
@@ -91,7 +91,7 @@ impl oio::Page for AzdfsPager {
         let mut entries = Vec::with_capacity(output.paths.len());
 
         for object in output.paths {
-            // Azdfs will return `"true"` and `"false"` for is_directory.
+            // Azdls will return `"true"` and `"false"` for is_directory.
             let mode = if &object.is_directory == "true" {
                 EntryMode::DIR
             } else {
@@ -139,7 +139,7 @@ struct Path {
     content_length: String,
     #[serde(rename = "etag")]
     etag: String,
-    /// Azdfs will return `"true"` and `"false"` for is_directory.
+    /// Azdls will return `"true"` and `"false"` for is_directory.
     #[serde(rename = "isDirectory")]
     is_directory: String,
     #[serde(rename = "lastModified")]
diff --git a/core/src/services/azdfs/writer.rs 
b/core/src/services/azdls/writer.rs
similarity index 82%
rename from core/src/services/azdfs/writer.rs
rename to core/src/services/azdls/writer.rs
index 0424fe494..2437a1856 100644
--- a/core/src/services/azdfs/writer.rs
+++ b/core/src/services/azdls/writer.rs
@@ -20,29 +20,29 @@ use std::sync::Arc;
 use async_trait::async_trait;
 use http::StatusCode;
 
-use super::core::AzdfsCore;
+use super::core::AzdlsCore;
 use super::error::parse_error;
 use crate::raw::oio::WriteBuf;
 use crate::raw::*;
 use crate::*;
 
-pub struct AzdfsWriter {
-    core: Arc<AzdfsCore>,
+pub struct AzdlsWriter {
+    core: Arc<AzdlsCore>,
 
     op: OpWrite,
     path: String,
 }
 
-impl AzdfsWriter {
-    pub fn new(core: Arc<AzdfsCore>, op: OpWrite, path: String) -> Self {
-        AzdfsWriter { core, op, path }
+impl AzdlsWriter {
+    pub fn new(core: Arc<AzdlsCore>, op: OpWrite, path: String) -> Self {
+        AzdlsWriter { core, op, path }
     }
 }
 
 #[async_trait]
-impl oio::OneShotWrite for AzdfsWriter {
+impl oio::OneShotWrite for AzdlsWriter {
     async fn write_once(&self, bs: &dyn WriteBuf) -> Result<()> {
-        let mut req = self.core.azdfs_create_request(
+        let mut req = self.core.azdls_create_request(
             &self.path,
             "file",
             self.op.content_type(),
@@ -62,12 +62,12 @@ impl oio::OneShotWrite for AzdfsWriter {
             _ => {
                 return Err(parse_error(resp)
                     .await?
-                    .with_operation("Backend::azdfs_create_request"));
+                    .with_operation("Backend::azdls_create_request"));
             }
         }
 
         let bs = 
oio::ChunkedBytes::from_vec(bs.vectored_bytes(bs.remaining()));
-        let mut req = self.core.azdfs_update_request(
+        let mut req = self.core.azdls_update_request(
             &self.path,
             Some(bs.len()),
             AsyncBody::ChunkedBytes(bs),
@@ -85,7 +85,7 @@ impl oio::OneShotWrite for AzdfsWriter {
             }
             _ => Err(parse_error(resp)
                 .await?
-                .with_operation("Backend::azdfs_update_request")),
+                .with_operation("Backend::azdls_update_request")),
         }
     }
 }
diff --git a/core/src/services/mod.rs b/core/src/services/mod.rs
index e3236ef59..ea5aadc12 100644
--- a/core/src/services/mod.rs
+++ b/core/src/services/mod.rs
@@ -24,10 +24,10 @@ mod azblob;
 #[cfg(feature = "services-azblob")]
 pub use azblob::Azblob;
 
-#[cfg(feature = "services-azdfs")]
-mod azdfs;
-#[cfg(feature = "services-azdfs")]
-pub use azdfs::Azdfs;
+#[cfg(feature = "services-azdls")]
+mod azdls;
+#[cfg(feature = "services-azdls")]
+pub use azdls::Azdls;
 
 #[cfg(feature = "services-cos")]
 mod cos;
diff --git a/core/src/types/operator/builder.rs 
b/core/src/types/operator/builder.rs
index e21391351..a87daf8a8 100644
--- a/core/src/types/operator/builder.rs
+++ b/core/src/types/operator/builder.rs
@@ -155,8 +155,8 @@ impl Operator {
         let op = match scheme {
             #[cfg(feature = "services-azblob")]
             Scheme::Azblob => 
Self::from_map::<services::Azblob>(map)?.finish(),
-            #[cfg(feature = "services-azdfs")]
-            Scheme::Azdfs => Self::from_map::<services::Azdfs>(map)?.finish(),
+            #[cfg(feature = "services-Azdls")]
+            Scheme::Azdls => Self::from_map::<services::Azdls>(map)?.finish(),
             #[cfg(feature = "services-cacache")]
             Scheme::Cacache => 
Self::from_map::<services::Cacache>(map)?.finish(),
             #[cfg(feature = "services-cos")]
diff --git a/core/src/types/scheme.rs b/core/src/types/scheme.rs
index 4fb0303e8..e32ef5e60 100644
--- a/core/src/types/scheme.rs
+++ b/core/src/types/scheme.rs
@@ -33,8 +33,8 @@ use crate::Error;
 pub enum Scheme {
     /// [azblob][crate::services::Azblob]: Azure Storage Blob services.
     Azblob,
-    /// [azdfs][crate::services::Azdfs]: Azure Data Lake Storage Gen2.
-    Azdfs,
+    /// [Azdls][crate::services::Azdls]: Azure Data Lake Storage Gen2.
+    Azdls,
     /// [cacache][crate::services::Cacache]: cacache backend support.
     Cacache,
     /// [cos][crate::services::Cos]: Tencent Cloud Object Storage services.
@@ -143,7 +143,11 @@ impl FromStr for Scheme {
         let s = s.to_lowercase();
         match s.as_str() {
             "azblob" => Ok(Scheme::Azblob),
-            "azdfs" => Ok(Scheme::Azdfs),
+            // Notes:
+            //
+            // OpenDAL used to call `azdls` as `azdfs`, we keep it for 
backward compatibility.
+            // And abfs is widely used in hadoop ecosystem, keep it for easy 
to use.
+            "azdls" | "azdfs" | "abfs" => Ok(Scheme::Azdls),
             "cacache" => Ok(Scheme::Cacache),
             "cos" => Ok(Scheme::Cos),
             "dashmap" => Ok(Scheme::Dashmap),
@@ -188,7 +192,7 @@ impl From<Scheme> for &'static str {
     fn from(v: Scheme) -> Self {
         match v {
             Scheme::Azblob => "azblob",
-            Scheme::Azdfs => "azdfs",
+            Scheme::Azdls => "Azdls",
             Scheme::Cacache => "cacache",
             Scheme::Cos => "cos",
             Scheme::Dashmap => "dashmap",
diff --git a/core/tests/behavior/main.rs b/core/tests/behavior/main.rs
index facbd34a0..bc7678407 100644
--- a/core/tests/behavior/main.rs
+++ b/core/tests/behavior/main.rs
@@ -99,8 +99,8 @@ fn main() -> anyhow::Result<()> {
 
     #[cfg(feature = "services-azblob")]
     tests.extend(behavior_test::<services::Azblob>());
-    #[cfg(feature = "services-azdfs")]
-    tests.extend(behavior_test::<services::Azdfs>());
+    #[cfg(feature = "services-Azdls")]
+    tests.extend(behavior_test::<services::Azdls>());
     #[cfg(feature = "services-cacache")]
     tests.extend(behavior_test::<services::Cacache>());
     #[cfg(feature = "services-cos")]
diff --git a/website/blog/2023-07-07-apache-opendal-access-data-freely/index.md 
b/website/blog/2023-07-07-apache-opendal-access-data-freely/index.md
index 22fac84e7..c98c3904c 100644
--- a/website/blog/2023-07-07-apache-opendal-access-data-freely/index.md
+++ b/website/blog/2023-07-07-apache-opendal-access-data-freely/index.md
@@ -53,7 +53,7 @@ OpenDAL supports dozens of storage services, covering a wide 
range of scenarios
 
 - Standard Storage Protocols: FTP, HTTP, SFTP, WebDAV, etc.
 - Object Storage Services: azblob, gcs, obs, oss, s3, etc.
-- File Storage Services: fs, azdfs, hdfs, webhdfs, ipfs, etc.
+- File Storage Services: fs, azdls, hdfs, webhdfs, ipfs, etc.
 - Consumer Cloud Storage Service: Google Drive, OneDrive, Dropbox, etc.
 - Key-Value Storage Service: Memory, Redis, Rocksdb, etc.
 - Cache Storage Service: Ghac, Memcached, etc.
diff --git a/website/docs/services/azdfs.mdx b/website/docs/services/azdls.mdx
similarity index 87%
rename from website/docs/services/azdfs.mdx
rename to website/docs/services/azdls.mdx
index b87547147..92555b2f6 100644
--- a/website/docs/services/azdfs.mdx
+++ b/website/docs/services/azdls.mdx
@@ -1,10 +1,10 @@
 ---
-title: Azdfs
+title: Azdls
 ---
 
 Azure Data Lake Storage Gen2 Support.
 
-import Docs from '../../../core/src/services/azdfs/docs.md'
+import Docs from '../../../core/src/services/azdls/docs.md'
 
 <Docs components={props.components} />
 
@@ -32,7 +32,7 @@ async fn main() -> Result<()> {
     map.insert("account_name".to_string(), "account_name".to_string());
     map.insert("account_key".to_string(), "account_key".to_string());
 
-    let op: Operator = Operator::via_map(Scheme::Azdfs, map)?;
+    let op: Operator = Operator::via_map(Scheme::Azdls, map)?;
     Ok(())
 }
 ```
@@ -44,7 +44,7 @@ async fn main() -> Result<()> {
 import { Operator } from "opendal";
 
 async function main() {
-  const op = new Operator("azdfs", {
+  const op = new Operator("azdls", {
     root: "/path/to/dir",
     filesystem: "test",
     endpoint: "https://accountname.dfs.core.windows.net";,
@@ -60,7 +60,7 @@ async function main() {
 ```python
 import opendal
 
-op = opendal.Operator("azdfs",
+op = opendal.Operator("azdls",
     root="/path/to/dir",
     filesystem="test",
     endpoint="https://accountname.dfs.core.windows.net";,
diff --git a/website/src/components/HomepageFeatures/_feature_services.mdx 
b/website/src/components/HomepageFeatures/_feature_services.mdx
index 545f128f6..e016b228e 100644
--- a/website/src/components/HomepageFeatures/_feature_services.mdx
+++ b/website/src/components/HomepageFeatures/_feature_services.mdx
@@ -27,7 +27,7 @@ Apache OpenDAL provides native support for all kinds for 
storage systems.
 <summary>File Storage Services</summary>
 
 - fs: POSIX alike file system
-- azdfs: [Azure Data Lake Storage 
Gen2](https://azure.microsoft.com/en-us/products/storage/data-lake-storage/) 
services (As known as 
[abfs](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-abfs-driver))
+- azdls: [Azure Data Lake Storage 
Gen2](https://azure.microsoft.com/en-us/products/storage/data-lake-storage/) 
services (As known as 
[abfs](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-abfs-driver))
 - hdfs: [Hadoop Distributed File 
System](https://hadoop.apache.org/docs/r3.3.4/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)(HDFS)
 - ipfs: [InterPlanetary File System](https://ipfs.tech/) HTTP Gateway
 - ipmfs: [InterPlanetary File System](https://ipfs.tech/) MFS API *being 
worked on*


Reply via email to