This is an automated email from the ASF dual-hosted git repository.

tison pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/opendal.git


The following commit(s) were added to refs/heads/main by this push:
     new 67cd00d74 refactor(foyer): seperate mods from lib.rs (#7154)
67cd00d74 is described below

commit 67cd00d744e8a6a64b9729f49bdaa1f4de215885
Author: flaneur <[email protected]>
AuthorDate: Wed Jan 21 23:31:03 2026 +0800

    refactor(foyer): seperate mods from lib.rs (#7154)
---
 core/layers/foyer/src/deleter.rs |  60 +++++++++
 core/layers/foyer/src/error.rs   |  41 ++++++
 core/layers/foyer/src/full.rs    | 136 ++++++++++++++++++++
 core/layers/foyer/src/lib.rs     | 217 ++++----------------------------
 core/layers/foyer/src/writer.rs  |  88 +++++++++++++
 dev/Cargo.lock                   | 260 +++++++++++++++++++++++++++++++++++----
 6 files changed, 587 insertions(+), 215 deletions(-)

diff --git a/core/layers/foyer/src/deleter.rs b/core/layers/foyer/src/deleter.rs
new file mode 100644
index 000000000..1de6b662e
--- /dev/null
+++ b/core/layers/foyer/src/deleter.rs
@@ -0,0 +1,60 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::sync::Arc;
+
+use opendal_core::Result;
+use opendal_core::raw::Access;
+use opendal_core::raw::OpDelete;
+use opendal_core::raw::oio;
+
+use crate::FoyerKey;
+use crate::Inner;
+
+pub struct Deleter<A: Access> {
+    pub(crate) deleter: A::Deleter,
+    pub(crate) keys: Vec<FoyerKey>,
+    pub(crate) inner: Arc<Inner<A>>,
+}
+
+impl<A: Access> Deleter<A> {
+    pub(crate) fn new(deleter: A::Deleter, inner: Arc<Inner<A>>) -> Self {
+        Self {
+            deleter,
+            keys: vec![],
+            inner,
+        }
+    }
+}
+
+impl<A: Access> oio::Delete for Deleter<A> {
+    async fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> {
+        self.deleter.delete(path, args.clone()).await?;
+        self.keys.push(FoyerKey {
+            path: path.to_string(),
+            version: args.version().map(|v| v.to_string()),
+        });
+        Ok(())
+    }
+
+    async fn close(&mut self) -> Result<()> {
+        for key in &self.keys {
+            self.inner.cache.remove(key);
+        }
+        self.deleter.close().await
+    }
+}
diff --git a/core/layers/foyer/src/error.rs b/core/layers/foyer/src/error.rs
new file mode 100644
index 000000000..83c3f8b4e
--- /dev/null
+++ b/core/layers/foyer/src/error.rs
@@ -0,0 +1,41 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use foyer::Error as FoyerError;
+
+use opendal_core::Error;
+use opendal_core::ErrorKind;
+
+/// Custom error type for when fetched data exceeds size limit.
+#[derive(Debug)]
+pub(crate) struct FetchSizeTooLarge;
+
+impl std::fmt::Display for FetchSizeTooLarge {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(f, "fetched data size exceeds size limit")
+    }
+}
+
+impl std::error::Error for FetchSizeTooLarge {}
+
+pub(crate) fn extract_err(e: FoyerError) -> Error {
+    let e = match e.downcast::<Error>() {
+        Ok(e) => return e,
+        Err(e) => e,
+    };
+    Error::new(ErrorKind::Unexpected, e.to_string())
+}
diff --git a/core/layers/foyer/src/full.rs b/core/layers/foyer/src/full.rs
new file mode 100644
index 000000000..95772d012
--- /dev/null
+++ b/core/layers/foyer/src/full.rs
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::sync::Arc;
+
+use foyer::Error as FoyerError;
+
+use opendal_core::Buffer;
+use opendal_core::Result;
+use opendal_core::raw::Access;
+use opendal_core::raw::BytesContentRange;
+use opendal_core::raw::BytesRange;
+use opendal_core::raw::OpRead;
+use opendal_core::raw::OpStat;
+use opendal_core::raw::RpRead;
+use opendal_core::raw::oio::Read;
+
+use crate::FoyerKey;
+use crate::FoyerValue;
+use crate::Inner;
+use crate::error::{FetchSizeTooLarge, extract_err};
+
+pub struct FullReader<A: Access> {
+    inner: Arc<Inner<A>>,
+    size_limit: std::ops::Range<usize>,
+}
+
+impl<A: Access> FullReader<A> {
+    pub fn new(inner: Arc<Inner<A>>, size_limit: std::ops::Range<usize>) -> 
Self {
+        Self { inner, size_limit }
+    }
+
+    /// Read data from cache or underlying storage.
+    /// Caches the ENTIRE object, then slices to requested range.
+    pub async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, 
Buffer)> {
+        let path_str = path.to_string();
+        let version = args.version().map(|v| v.to_string());
+        let original_args = args.clone();
+
+        // Extract range bounds before async block to avoid lifetime issues
+        let (range_start, range_end) = {
+            let r = args.range();
+            let start = r.offset();
+            let end = r.size().map(|size| start + size);
+            (start, end)
+        };
+
+        // Use fetch to read data from cache or fallback to remote. fetch() 
can automatically
+        // handle the thundering herd problem by ensuring only one request is 
made for a given
+        // key.
+        //
+        // Please note that we only cache the object if it's smaller than 
size_limit. And we'll
+        // fetch the ENTIRE object from remote to put it into cache, then 
slice it to the requested
+        // range.
+        let result = self
+            .inner
+            .cache
+            .fetch(
+                FoyerKey {
+                    path: path_str.clone(),
+                    version: version.clone(),
+                },
+                || {
+                    let inner = self.inner.clone();
+                    let size_limit = self.size_limit.clone();
+                    let path_clone = path_str.clone();
+                    async move {
+                        // read the metadata first, if it's too large, do not 
cache
+                        let metadata = inner
+                            .accessor
+                            .stat(&path_clone, OpStat::default())
+                            .await
+                            .map_err(FoyerError::other)?
+                            .into_metadata();
+
+                        let size = metadata.content_length() as usize;
+                        if !size_limit.contains(&size) {
+                            return Err(FoyerError::other(FetchSizeTooLarge));
+                        }
+
+                        // fetch the ENTIRE object from remote.
+                        let (_, mut reader) = inner
+                            .accessor
+                            .read(
+                                &path_clone,
+                                
OpRead::default().with_range(BytesRange::new(0, None)),
+                            )
+                            .await
+                            .map_err(FoyerError::other)?;
+                        let buffer = 
reader.read_all().await.map_err(FoyerError::other)?;
+
+                        Ok(FoyerValue(buffer))
+                    }
+                },
+            )
+            .await;
+
+        // If got entry from cache, slice it to the requested range. If it's 
larger than size_limit,
+        // we'll simply forward the request to the underlying accessor with 
user's given range.
+        match result {
+            Ok(entry) => {
+                let end = range_end.unwrap_or(entry.len() as u64);
+                let range = BytesContentRange::default()
+                    .with_range(range_start, end - 1)
+                    .with_size(entry.len() as _);
+                let buffer = entry.slice(range_start as usize..end as usize);
+                let rp = RpRead::new()
+                    .with_size(Some(buffer.len() as _))
+                    .with_range(Some(range));
+                Ok((rp, buffer))
+            }
+            Err(e) => match e.downcast::<FetchSizeTooLarge>() {
+                Ok(_) => {
+                    let (rp, mut reader) = self.inner.accessor.read(path, 
original_args).await?;
+                    let buffer = reader.read_all().await?;
+                    Ok((rp, buffer))
+                }
+                Err(e) => Err(extract_err(e)),
+            },
+        }
+    }
+}
diff --git a/core/layers/foyer/src/lib.rs b/core/layers/foyer/src/lib.rs
index 1242204a8..06b9c389e 100644
--- a/core/layers/foyer/src/lib.rs
+++ b/core/layers/foyer/src/lib.rs
@@ -15,37 +15,24 @@
 // specific language governing permissions and limitations
 // under the License.
 
+mod deleter;
+mod error;
+mod full;
+mod writer;
+
 use std::{
     future::Future,
     ops::{Bound, Deref, Range, RangeBounds},
     sync::Arc,
 };
 
-use foyer::{Code, CodeError, Error as FoyerError, HybridCache};
+use foyer::{Code, CodeError, HybridCache};
 
-use opendal_core::raw::oio::*;
 use opendal_core::raw::*;
 use opendal_core::*;
 
-/// Custom error type for when fetched data exceeds size limit.
-#[derive(Debug)]
-struct FetchSizeTooLarge;
-
-impl std::fmt::Display for FetchSizeTooLarge {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        write!(f, "fetched data size exceeds size limit")
-    }
-}
-
-impl std::error::Error for FetchSizeTooLarge {}
-
-fn extract_err(e: FoyerError) -> Error {
-    let e = match e.downcast::<Error>() {
-        Ok(e) => return e,
-        Err(e) => e,
-    };
-    Error::new(ErrorKind::Unexpected, e.to_string())
-}
+pub use deleter::Deleter;
+pub use writer::Writer;
 
 /// [`FoyerKey`] is a key for the foyer cache. It's encoded via bincode, which 
is
 /// backed by foyer's "serde" feature.
@@ -181,10 +168,10 @@ impl<A: Access> Layer<A> for FoyerLayer {
 }
 
 #[derive(Debug)]
-struct Inner<A: Access> {
-    accessor: A,
-    cache: HybridCache<FoyerKey, FoyerValue>,
-    size_limit: Range<usize>,
+pub(crate) struct Inner<A: Access> {
+    pub(crate) accessor: A,
+    pub(crate) cache: HybridCache<FoyerKey, FoyerValue>,
+    pub(crate) size_limit: Range<usize>,
 }
 
 #[derive(Debug)]
@@ -208,90 +195,9 @@ impl<A: Access> LayeredAccess for FoyerAccessor<A> {
     }
 
     async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, 
Self::Reader)> {
-        let path_str = path.to_string();
-        let version = args.version().map(|v| v.to_string());
-        let original_args = args.clone();
-
-        // Extract range bounds before async block to avoid lifetime issues
-        let (range_start, range_end) = {
-            let r = args.range();
-            let start = r.offset();
-            let end = r.size().map(|size| start + size);
-            (start, end)
-        };
-
-        // Use fetch to read data from cache or fallback to remote. fetch() 
can automatically
-        // handle the thundering herd problem by ensuring only one request is 
made for a given
-        // key.
-        //
-        // Please note that we only cache the object if it's smaller than 
size_limit. And we'll
-        // fetch the ENTIRE object from remote to put it into cache, then 
slice it to the requested
-        // range.
-        let result = self
-            .inner
-            .cache
-            .fetch(
-                FoyerKey {
-                    path: path_str.clone(),
-                    version: version.clone(),
-                },
-                || {
-                    let inner = self.inner.clone();
-                    let path_clone = path_str.clone();
-                    async move {
-                        // read the metadata first, if it's too large, do not 
cache
-                        let metadata = inner
-                            .accessor
-                            .stat(&path_clone, OpStat::default())
-                            .await
-                            .map_err(FoyerError::other)?
-                            .into_metadata();
-
-                        let size = metadata.content_length() as usize;
-                        if !inner.size_limit.contains(&size) {
-                            return Err(FoyerError::other(FetchSizeTooLarge));
-                        }
-
-                        // fetch the ENTIRE object from remote.
-                        let (_, mut reader) = inner
-                            .accessor
-                            .read(
-                                &path_clone,
-                                
OpRead::default().with_range(BytesRange::new(0, None)),
-                            )
-                            .await
-                            .map_err(FoyerError::other)?;
-                        let buffer = 
reader.read_all().await.map_err(FoyerError::other)?;
-
-                        Ok(FoyerValue(buffer))
-                    }
-                },
-            )
-            .await;
-
-        // If got entry from cache, slice it to the requested range. If it's 
larger than size_limit,
-        // we'll simply forward the request to the underlying accessor with 
user's given range.
-        match result {
-            Ok(entry) => {
-                let end = range_end.unwrap_or(entry.len() as u64);
-                let range = BytesContentRange::default()
-                    .with_range(range_start, end - 1)
-                    .with_size(entry.len() as _);
-                let buffer = entry.slice(range_start as usize..end as usize);
-                let rp = RpRead::new()
-                    .with_size(Some(buffer.len() as _))
-                    .with_range(Some(range));
-                Ok((rp, buffer))
-            }
-            Err(e) => match e.downcast::<FetchSizeTooLarge>() {
-                Ok(_) => {
-                    let (rp, mut reader) = self.inner.accessor.read(path, 
original_args).await?;
-                    let buffer = reader.read_all().await?;
-                    Ok((rp, buffer))
-                }
-                Err(e) => Err(extract_err(e)),
-            },
-        }
+        full::FullReader::new(self.inner.clone(), 
self.inner.size_limit.clone())
+            .read(path, args)
+            .await
     }
 
     fn write(
@@ -300,18 +206,11 @@ impl<A: Access> LayeredAccess for FoyerAccessor<A> {
         args: OpWrite,
     ) -> impl Future<Output = Result<(RpWrite, Self::Writer)>> + MaybeSend {
         let inner = self.inner.clone();
+        let size_limit = self.inner.size_limit.clone();
+        let path = path.to_string();
         async move {
-            let (rp, w) = self.inner.accessor.write(path, args).await?;
-            Ok((
-                rp,
-                Writer {
-                    w,
-                    buf: QueueBuf::new(),
-                    path: path.to_string(),
-                    inner,
-                    skip_cache: false,
-                },
-            ))
+            let (rp, w) = inner.accessor.write(&path, args).await?;
+            Ok((rp, Writer::new(w, path, inner, size_limit)))
         }
     }
 
@@ -319,14 +218,7 @@ impl<A: Access> LayeredAccess for FoyerAccessor<A> {
         let inner = self.inner.clone();
         async move {
             let (rp, d) = inner.accessor.delete().await?;
-            Ok((
-                rp,
-                Deleter {
-                    deleter: d,
-                    keys: vec![],
-                    inner,
-                },
-            ))
+            Ok((rp, Deleter::new(d, inner)))
         }
     }
 
@@ -337,81 +229,18 @@ impl<A: Access> LayeredAccess for FoyerAccessor<A> {
     // TODO(MrCroxx): Implement copy, rename with foyer cache.
 }
 
-pub struct Writer<A: Access> {
-    w: A::Writer,
-    buf: QueueBuf,
-    path: String,
-    inner: Arc<Inner<A>>,
-    skip_cache: bool,
-}
-
-impl<A: Access> oio::Write for Writer<A> {
-    async fn write(&mut self, bs: Buffer) -> Result<()> {
-        if self.inner.size_limit.contains(&(self.buf.len() + bs.len())) {
-            self.buf.push(bs.clone());
-            self.skip_cache = false;
-        } else {
-            self.buf.clear();
-            self.skip_cache = true;
-        }
-        self.w.write(bs).await
-    }
-
-    async fn close(&mut self) -> Result<Metadata> {
-        let buffer = self.buf.clone().collect();
-        let metadata = self.w.close().await?;
-        if !self.skip_cache {
-            self.inner.cache.insert(
-                FoyerKey {
-                    path: self.path.clone(),
-                    version: metadata.version().map(|v| v.to_string()),
-                },
-                FoyerValue(buffer),
-            );
-        }
-        Ok(metadata)
-    }
-
-    async fn abort(&mut self) -> Result<()> {
-        self.buf.clear();
-        self.w.abort().await
-    }
-}
-
-pub struct Deleter<A: Access> {
-    deleter: A::Deleter,
-    keys: Vec<FoyerKey>,
-    inner: Arc<Inner<A>>,
-}
-
-impl<A: Access> oio::Delete for Deleter<A> {
-    async fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> {
-        self.deleter.delete(path, args.clone()).await?;
-        self.keys.push(FoyerKey {
-            path: path.to_string(),
-            version: args.version().map(|v| v.to_string()),
-        });
-        Ok(())
-    }
-
-    async fn close(&mut self) -> Result<()> {
-        for key in &self.keys {
-            self.inner.cache.remove(key);
-        }
-        self.deleter.close().await
-    }
-}
-
 #[cfg(test)]
 mod tests {
     use foyer::{
-        DirectFsDeviceOptions, Engine, HybridCacheBuilder, LargeEngineOptions, 
RecoverMode,
+        DirectFsDeviceOptions, Engine, Error as FoyerError, 
HybridCacheBuilder, LargeEngineOptions,
+        RecoverMode,
     };
     use opendal_core::{Operator, services::Memory};
     use size::consts::MiB;
     use std::io::Cursor;
 
     use super::*;
+    use crate::error::extract_err;
 
     fn key(i: u8) -> String {
         format!("obj-{i}")
diff --git a/core/layers/foyer/src/writer.rs b/core/layers/foyer/src/writer.rs
new file mode 100644
index 000000000..fd505f174
--- /dev/null
+++ b/core/layers/foyer/src/writer.rs
@@ -0,0 +1,88 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::sync::Arc;
+
+use opendal_core::Buffer;
+use opendal_core::Metadata;
+use opendal_core::Result;
+use opendal_core::raw::Access;
+use opendal_core::raw::oio;
+
+use crate::FoyerKey;
+use crate::FoyerValue;
+use crate::Inner;
+
+pub struct Writer<A: Access> {
+    pub(crate) w: A::Writer,
+    pub(crate) buf: oio::QueueBuf,
+    pub(crate) path: String,
+    pub(crate) inner: Arc<Inner<A>>,
+    pub(crate) size_limit: std::ops::Range<usize>,
+    pub(crate) skip_cache: bool,
+}
+
+impl<A: Access> Writer<A> {
+    pub(crate) fn new(
+        w: A::Writer,
+        path: String,
+        inner: Arc<Inner<A>>,
+        size_limit: std::ops::Range<usize>,
+    ) -> Self {
+        Self {
+            w,
+            buf: oio::QueueBuf::new(),
+            path,
+            inner,
+            size_limit,
+            skip_cache: false,
+        }
+    }
+}
+
+impl<A: Access> oio::Write for Writer<A> {
+    async fn write(&mut self, bs: Buffer) -> Result<()> {
+        if self.size_limit.contains(&(self.buf.len() + bs.len())) {
+            self.buf.push(bs.clone());
+            self.skip_cache = false;
+        } else {
+            self.buf.clear();
+            self.skip_cache = true;
+        }
+        self.w.write(bs).await
+    }
+
+    async fn close(&mut self) -> Result<Metadata> {
+        let buffer = self.buf.clone().collect();
+        let metadata = self.w.close().await?;
+        if !self.skip_cache {
+            self.inner.cache.insert(
+                FoyerKey {
+                    path: self.path.clone(),
+                    version: metadata.version().map(|v| v.to_string()),
+                },
+                FoyerValue(buffer),
+            );
+        }
+        Ok(metadata)
+    }
+
+    async fn abort(&mut self) -> Result<()> {
+        self.buf.clear();
+        self.w.abort().await
+    }
+}
diff --git a/dev/Cargo.lock b/dev/Cargo.lock
index b776dabb2..f3c500b8f 100644
--- a/dev/Cargo.lock
+++ b/dev/Cargo.lock
@@ -1,6 +1,6 @@
 # This file is automatically @generated by Cargo.
 # It is not intended for manual editing.
-version = 3
+version = 4
 
 [[package]]
 name = "adler2"
@@ -149,6 +149,15 @@ version = "1.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index";
 checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
 
+[[package]]
+name = "colored"
+version = "3.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "faf9468729b8cbcea668e36183cb69d317348c2e08e994829fb56ebfdfbaac34"
+dependencies = [
+ "windows-sys",
+]
+
 [[package]]
 name = "cpufeatures"
 version = "0.2.17"
@@ -224,16 +233,6 @@ version = "1.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index";
 checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
 
-[[package]]
-name = "env_filter"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
-dependencies = [
- "log",
- "regex",
-]
-
 [[package]]
 name = "env_home"
 version = "0.1.0"
@@ -246,6 +245,17 @@ version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index";
 checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
 
+[[package]]
+name = "erased-serde"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "89e8918065695684b2b0702da20382d5ae6065cf3327bc2d6436bd49a71ce9f3"
+dependencies = [
+ "serde",
+ "serde_core",
+ "typeid",
+]
+
 [[package]]
 name = "errno"
 version = "0.3.10"
@@ -427,20 +437,79 @@ checksum = 
"d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
 
 [[package]]
 name = "log"
-version = "0.4.22"
+version = "0.4.29"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
+checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
+dependencies = [
+ "sval",
+ "sval_ref",
+ "value-bag",
+]
 
 [[package]]
 name = "logforth"
-version = "0.23.1"
+version = "0.29.1"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "f9b81df91671a0a96902d950498cc69b509291c37f6d49105d5a1b7ddacc727d"
+checksum = "40c105c59828d07aeb95b06f9a345b12869ddc249d44a7302697a66da439076f"
+dependencies = [
+ "logforth-append-file",
+ "logforth-bridge-log",
+ "logforth-core",
+ "logforth-layout-json",
+ "logforth-layout-text",
+]
+
+[[package]]
+name = "logforth-append-file"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "7d2ccb8b7e501c114e80069eb2b83c02a48039c23a7037e717b5b09a4ed306fb"
 dependencies = [
- "anyhow",
- "env_filter",
  "jiff",
+ "logforth-core",
+]
+
+[[package]]
+name = "logforth-bridge-log"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "a4aa6ca548389fd166a995b5940e15b0dacbdd5a30f2f24eac9aa4bf664bda5c"
+dependencies = [
  "log",
+ "logforth-core",
+]
+
+[[package]]
+name = "logforth-core"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "a77869b8dba38c67ed19e1753e59d9faefdcc60557bc4e84db0348606a304ac5"
+dependencies = [
+ "anyhow",
+ "value-bag",
+]
+
+[[package]]
+name = "logforth-layout-json"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "01b80d310e0670560404a825f64dbd78a8761c5bb7da952513e90ba9dd525bd2"
+dependencies = [
+ "jiff",
+ "logforth-core",
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "logforth-layout-text"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "e2a4674e549a59eeac8e301584143186c433181bdc5460046a130becedef6a3d"
+dependencies = [
+ "colored",
+ "jiff",
+ "logforth-core",
 ]
 
 [[package]]
@@ -618,24 +687,52 @@ checksum = 
"f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03"
 
 [[package]]
 name = "serde"
-version = "1.0.216"
+version = "1.0.228"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
+dependencies = [
+ "serde_core",
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_buf"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "fc948de1bbead18a61be0b33182636603ea0239ca2577b9704fc39eba900e4e5"
+dependencies = [
+ "serde_core",
+]
+
+[[package]]
+name = "serde_core"
+version = "1.0.228"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e"
+checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.216"
+version = "1.0.228"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e"
+checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
 dependencies = [
  "proc-macro2",
  "quote",
  "syn",
 ]
 
+[[package]]
+name = "serde_fmt"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "6e497af288b3b95d067a23a4f749f2861121ffcb2f6d8379310dcda040c345ed"
+dependencies = [
+ "serde_core",
+]
+
 [[package]]
 name = "serde_json"
 version = "1.0.138"
@@ -675,6 +772,84 @@ version = "0.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index";
 checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
 
+[[package]]
+name = "sval"
+version = "2.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "502b8906c4736190684646827fbab1e954357dfe541013bbd7994d033d53a1ca"
+
+[[package]]
+name = "sval_buffer"
+version = "2.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "c4b854348b15b6c441bdd27ce9053569b016a0723eab2d015b1fd8e6abe4f708"
+dependencies = [
+ "sval",
+ "sval_ref",
+]
+
+[[package]]
+name = "sval_dynamic"
+version = "2.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "a0bd9e8b74410ddad37c6962587c5f9801a2caadba9e11f3f916ee3f31ae4a1f"
+dependencies = [
+ "sval",
+]
+
+[[package]]
+name = "sval_fmt"
+version = "2.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "6fe17b8deb33a9441280b4266c2d257e166bafbaea6e66b4b34ca139c91766d9"
+dependencies = [
+ "itoa",
+ "ryu",
+ "sval",
+]
+
+[[package]]
+name = "sval_json"
+version = "2.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "854addb048a5bafb1f496c98e0ab5b9b581c3843f03ca07c034ae110d3b7c623"
+dependencies = [
+ "itoa",
+ "ryu",
+ "sval",
+]
+
+[[package]]
+name = "sval_nested"
+version = "2.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "96cf068f482108ff44ae8013477cb047a1665d5f1a635ad7cf79582c1845dce9"
+dependencies = [
+ "sval",
+ "sval_buffer",
+ "sval_ref",
+]
+
+[[package]]
+name = "sval_ref"
+version = "2.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "ed02126365ffe5ab8faa0abd9be54fbe68d03d607cd623725b0a71541f8aaa6f"
+dependencies = [
+ "sval",
+]
+
+[[package]]
+name = "sval_serde"
+version = "2.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "a263383c6aa2076c4ef6011d3bae1b356edf6ea2613e3d8e8ebaa7b57dd707d5"
+dependencies = [
+ "serde_core",
+ "sval",
+ "sval_nested",
+]
+
 [[package]]
 name = "syn"
 version = "2.0.100"
@@ -719,6 +894,12 @@ dependencies = [
  "winnow",
 ]
 
+[[package]]
+name = "typeid"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c"
+
 [[package]]
 name = "typenum"
 version = "1.17.0"
@@ -743,6 +924,43 @@ version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index";
 checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
 
+[[package]]
+name = "value-bag"
+version = "1.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "7ba6f5989077681266825251a52748b8c1d8a4ad098cc37e440103d0ea717fc0"
+dependencies = [
+ "value-bag-serde1",
+ "value-bag-sval2",
+]
+
+[[package]]
+name = "value-bag-serde1"
+version = "1.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "16530907bfe2999a1773ca5900a65101e092c70f642f25cc23ca0c43573262c5"
+dependencies = [
+ "erased-serde",
+ "serde_buf",
+ "serde_core",
+ "serde_fmt",
+]
+
+[[package]]
+name = "value-bag-sval2"
+version = "1.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index";
+checksum = "d00ae130edd690eaa877e4f40605d534790d1cf1d651e7685bd6a144521b251f"
+dependencies = [
+ "sval",
+ "sval_buffer",
+ "sval_dynamic",
+ "sval_fmt",
+ "sval_json",
+ "sval_ref",
+ "sval_serde",
+]
+
 [[package]]
 name = "version_check"
 version = "0.9.5"

Reply via email to