alamb commented on code in PR #9497:
URL: https://github.com/apache/arrow-rs/pull/9497#discussion_r2955713425


##########
arrow-json/src/reader/list_array.rs:
##########
@@ -93,15 +93,14 @@ impl<O: OffsetSizeTrait> ArrayDecoder for 
ListArrayDecoder<O> {
 
         let child_data = self.decoder.decode(tape, &child_pos)?;
         let nulls = nulls.as_mut().map(|x| NullBuffer::new(x.finish()));
+        let values = make_array(child_data);
+        let field = match &self.data_type {
+            DataType::List(f) | DataType::LargeList(f) => f.clone(),
+            _ => unreachable!(),
+        };
+        let offsets = 
OffsetBuffer::<O>::new(ScalarBuffer::from(offsets.finish()));
 
-        let data = ArrayDataBuilder::new(self.data_type.clone())
-            .len(pos.len())
-            .nulls(nulls)
-            .add_buffer(offsets.finish())
-            .child_data(vec![child_data]);
-
-        // Safety
-        // Validated lengths above
-        Ok(unsafe { data.build_unchecked() })
+        let array = GenericListArray::<O>::try_new(field, offsets, values, 
nulls)?;
+        Ok(array.into_data())

Review Comment:
   yes I do think it makes sense - - it would be nice to find some way to do 
that incrementally but if not then we may just have to do one big PR 🤔 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to