Xuanwo commented on code in PR #5617:
URL: https://github.com/apache/opendal/pull/5617#discussion_r1952059430


##########
core/src/services/hdfs_native/lister.rs:
##########
@@ -15,28 +15,27 @@
 // specific language governing permissions and limitations
 // under the License.
 
-use std::sync::Arc;
+use std::collections::VecDeque;
 
 use crate::raw::oio;
-use crate::raw::oio::Entry;
-use crate::*;
+use crate::Result;
 
 pub struct HdfsNativeLister {
-    _path: String,
-    _client: Arc<hdfs_native::Client>,
+    entries: VecDeque<oio::Entry>,
 }
 
 impl HdfsNativeLister {
-    pub fn new(path: String, client: Arc<hdfs_native::Client>) -> Self {
-        HdfsNativeLister {
-            _path: path,
-            _client: client,
-        }
+    pub fn new(entries: VecDeque<oio::Entry>) -> Self {
+        HdfsNativeLister { entries }
     }
 }
 
 impl oio::List for HdfsNativeLister {
-    async fn next(&mut self) -> Result<Option<Entry>> {
-        todo!()
+    async fn next(&mut self) -> Result<Option<oio::Entry>> {

Review Comment:
   We should call the `list_status` API here instead, so we don't have to store 
all entries in memory before the user calls `next`.
   
   In the future, we could also support using `LIST_BATCH` from HDFS, allowing 
users to call `list_status` multiple times to fetch all entries.



##########
core/src/services/hdfs_native/writer.rs:
##########
@@ -15,28 +15,41 @@
 // specific language governing permissions and limitations
 // under the License.
 
+use bytes::{Buf, Bytes};
 use hdfs_native::file::FileWriter;
 
-use crate::raw::oio;
+use crate::raw::*;
+use crate::services::hdfs_native::error::parse_hdfs_error;
 use crate::*;
 
 pub struct HdfsNativeWriter {
-    _f: FileWriter,
+    f: FileWriter,
 }
 
 impl HdfsNativeWriter {
     pub fn new(f: FileWriter) -> Self {
-        HdfsNativeWriter { _f: f }
+        HdfsNativeWriter { f }
     }
 }
 
 impl oio::Write for HdfsNativeWriter {
-    async fn write(&mut self, _bs: Buffer) -> Result<()> {
-        todo!()
+    async fn write(&mut self, mut bs: Buffer) -> Result<()> {
+        while bs.has_remaining() {
+            let n = self
+                .f
+                .write(Bytes::copy_from_slice(bs.chunk()))

Review Comment:
   The `Buffer` itself is an iterator of `Bytes`, so we can simplely:
   
   ```rust
   for chunk in bs {
     self.f.write(chunk).await;
     balabala
   }
   ```



##########
core/src/services/hdfs_native/reader.rs:
##########
@@ -15,23 +15,62 @@
 // specific language governing permissions and limitations
 // under the License.
 
+use bytes::Bytes;
+use bytes::BytesMut;
 use hdfs_native::file::FileReader;
+use tokio::io::ReadBuf;
 
 use crate::raw::*;
+use crate::services::hdfs_native::error::parse_hdfs_error;
 use crate::*;
 
 pub struct HdfsNativeReader {
-    _f: FileReader,
+    f: FileReader,
+    read: usize,
+    size: usize,
+    buf_size: usize,
+    buf: BytesMut,
 }
 
 impl HdfsNativeReader {
     pub fn new(f: FileReader) -> Self {
-        HdfsNativeReader { _f: f }
+        HdfsNativeReader {
+            f,
+            read: 0,
+            size: 0,
+            buf_size: 0,
+            buf: BytesMut::new(),
+        }
     }
 }
 
 impl oio::Read for HdfsNativeReader {
     async fn read(&mut self) -> Result<Buffer> {
-        todo!()
+        if self.read >= self.size {
+            return Ok(Buffer::new());
+        }
+
+        let size = (self.size - self.read).min(self.buf_size);
+        self.buf.reserve(size);
+
+        let buf = &mut self.buf.spare_capacity_mut()[..size];
+        let mut read_buf: ReadBuf = ReadBuf::uninit(buf);
+
+        // SAFETY: Read at most `limit` bytes into `read_buf`.
+        unsafe {
+            read_buf.assume_init(size);
+        }
+
+        let len = read_buf.initialize_unfilled().len();
+        let bytes: Bytes = self.f.read(len).await.map_err(parse_hdfs_error)?;

Review Comment:
   If the file returns `Bytes` directly, there's no need to clone it—we can 
simply return it to the users.
   
   I believe most code related to `self.buf` can removed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to