This is an automated email from the ASF dual-hosted git repository.
guanmingchiu pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/mahout.git
The following commit(s) were added to refs/heads/main by this push:
new 9e3738e4f [QDP] Add streaming basis encoding (#851)
9e3738e4f is described below
commit 9e3738e4fd030a993db5863ad3a6f64a7338e717
Author: Guan-Ming (Wesley) Chiu <[email protected]>
AuthorDate: Mon Jan 19 19:41:21 2026 +0800
[QDP] Add streaming basis encoding (#851)
---
qdp/qdp-core/src/encoding/amplitude.rs | 128 ++++++++++
qdp/qdp-core/src/encoding/basis.rs | 174 ++++++++++++++
qdp/qdp-core/src/encoding/mod.rs | 372 ++++++++++++++++++++++++++++++
qdp/qdp-core/src/lib.rs | 2 +
qdp/qdp-core/src/platform/linux.rs | 242 +------------------
qdp/qdp-core/src/readers/parquet.rs | 33 ++-
qdp/qdp-python/benchmark/benchmark_e2e.py | 84 ++++---
qdp/qdp-python/uv.lock | 131 +----------
8 files changed, 764 insertions(+), 402 deletions(-)
diff --git a/qdp/qdp-core/src/encoding/amplitude.rs
b/qdp/qdp-core/src/encoding/amplitude.rs
new file mode 100644
index 000000000..c308db33c
--- /dev/null
+++ b/qdp/qdp-core/src/encoding/amplitude.rs
@@ -0,0 +1,128 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Amplitude encoding implementation.
+
+use std::ffi::c_void;
+
+use cudarc::driver::{CudaSlice, DevicePtrMut};
+use qdp_kernels::{launch_amplitude_encode_batch, launch_l2_norm_batch};
+
+use super::{ChunkEncoder, STAGE_SIZE_ELEMENTS};
+use crate::gpu::PipelineContext;
+use crate::gpu::memory::PinnedHostBuffer;
+use crate::{MahoutError, QdpEngine, Result};
+
+/// Amplitude encoder state containing the norm buffer.
+pub(crate) struct AmplitudeEncoderState {
+ norm_buffer: CudaSlice<f64>,
+}
+
+/// Amplitude encoding: maps classical vectors to quantum state amplitudes.
+pub(crate) struct AmplitudeEncoder;
+
+impl ChunkEncoder for AmplitudeEncoder {
+ type State = AmplitudeEncoderState;
+
+ fn validate_sample_size(&self, sample_size: usize) -> Result<()> {
+ if sample_size == 0 {
+ return Err(MahoutError::InvalidInput(
+ "Sample size cannot be zero".into(),
+ ));
+ }
+ if sample_size > STAGE_SIZE_ELEMENTS {
+ return Err(MahoutError::InvalidInput(format!(
+ "Sample size {} exceeds staging buffer capacity {}",
+ sample_size, STAGE_SIZE_ELEMENTS
+ )));
+ }
+ Ok(())
+ }
+
+ fn init_state(
+ &self,
+ engine: &QdpEngine,
+ sample_size: usize,
+ _num_qubits: usize,
+ ) -> Result<Self::State> {
+ let max_samples_in_chunk = STAGE_SIZE_ELEMENTS / sample_size;
+ let norm_buffer = engine
+ .device
+ .alloc_zeros::<f64>(max_samples_in_chunk)
+ .map_err(|e| {
+ MahoutError::MemoryAllocation(format!("Failed to allocate norm
buffer: {:?}", e))
+ })?;
+ Ok(AmplitudeEncoderState { norm_buffer })
+ }
+
+ fn encode_chunk(
+ &self,
+ state: &mut Self::State,
+ _engine: &QdpEngine,
+ ctx: &PipelineContext,
+ _host_buffer: &PinnedHostBuffer,
+ dev_ptr: u64,
+ samples_in_chunk: usize,
+ sample_size: usize,
+ state_ptr_offset: *mut c_void,
+ state_len: usize,
+ _num_qubits: usize,
+ _global_sample_offset: usize,
+ ) -> Result<()> {
+ unsafe {
+ crate::profile_scope!("GPU::BatchEncode");
+
+ // Compute L2 norms
+ {
+ crate::profile_scope!("GPU::NormBatch");
+ let ret = launch_l2_norm_batch(
+ dev_ptr as *const f64,
+ samples_in_chunk,
+ sample_size,
+ *state.norm_buffer.device_ptr_mut() as *mut f64,
+ ctx.stream_compute.stream as *mut c_void,
+ );
+ if ret != 0 {
+ return Err(MahoutError::KernelLaunch(format!(
+ "Norm kernel error: {}",
+ ret
+ )));
+ }
+ }
+
+ // Encode amplitudes
+ {
+ crate::profile_scope!("GPU::EncodeBatch");
+ let ret = launch_amplitude_encode_batch(
+ dev_ptr as *const f64,
+ state_ptr_offset,
+ *state.norm_buffer.device_ptr_mut() as *const f64,
+ samples_in_chunk,
+ sample_size,
+ state_len,
+ ctx.stream_compute.stream as *mut c_void,
+ );
+ if ret != 0 {
+ return Err(MahoutError::KernelLaunch(format!(
+ "Encode kernel error: {}",
+ ret
+ )));
+ }
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/qdp/qdp-core/src/encoding/basis.rs
b/qdp/qdp-core/src/encoding/basis.rs
new file mode 100644
index 000000000..81750fa9a
--- /dev/null
+++ b/qdp/qdp-core/src/encoding/basis.rs
@@ -0,0 +1,174 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Basis encoding implementation.
+
+use std::ffi::c_void;
+
+use cudarc::driver::{CudaSlice, DevicePtr};
+use qdp_kernels::launch_basis_encode_batch;
+
+use super::{ChunkEncoder, STAGE_SIZE_ELEMENTS};
+use crate::gpu::PipelineContext;
+use crate::gpu::memory::PinnedHostBuffer;
+use crate::{MahoutError, QdpEngine, Result};
+
+/// Basis encoder state containing reusable buffers.
+pub(crate) struct BasisEncoderState {
+ /// Reusable CPU buffer for validated indices.
+ indices_cpu: Vec<usize>,
+ /// Reusable GPU buffer for indices.
+ indices_gpu: CudaSlice<usize>,
+}
+
+/// Basis encoding: maps integer indices to computational basis states.
+pub(crate) struct BasisEncoder;
+
+impl ChunkEncoder for BasisEncoder {
+ type State = BasisEncoderState;
+
+ fn needs_staging_copy(&self) -> bool {
+ // Basis encoding validates indices on CPU and uploads directly,
+ // so we don't need the staging buffer H2D copy.
+ false
+ }
+
+ fn validate_sample_size(&self, sample_size: usize) -> Result<()> {
+ if sample_size != 1 {
+ return Err(MahoutError::InvalidInput(format!(
+ "Basis encoding requires sample_size=1 (one index per sample),
got {}",
+ sample_size
+ )));
+ }
+ Ok(())
+ }
+
+ fn init_state(
+ &self,
+ engine: &QdpEngine,
+ sample_size: usize,
+ _num_qubits: usize,
+ ) -> Result<Self::State> {
+ // For basis encoding, sample_size is always 1, so max samples =
STAGE_SIZE_ELEMENTS
+ let max_samples_in_chunk = STAGE_SIZE_ELEMENTS / sample_size;
+
+ // Pre-allocate CPU buffer for indices
+ let indices_cpu = Vec::with_capacity(max_samples_in_chunk);
+
+ // Pre-allocate GPU buffer for indices
+ let indices_gpu =
+ unsafe { engine.device.alloc::<usize>(max_samples_in_chunk)
}.map_err(|e| {
+ MahoutError::MemoryAllocation(format!(
+ "Failed to allocate GPU indices buffer: {:?}",
+ e
+ ))
+ })?;
+
+ Ok(BasisEncoderState {
+ indices_cpu,
+ indices_gpu,
+ })
+ }
+
+ fn encode_chunk(
+ &self,
+ state: &mut Self::State,
+ engine: &QdpEngine,
+ ctx: &PipelineContext,
+ host_buffer: &PinnedHostBuffer,
+ _dev_ptr: u64,
+ samples_in_chunk: usize,
+ _sample_size: usize,
+ state_ptr_offset: *mut c_void,
+ state_len: usize,
+ num_qubits: usize,
+ global_sample_offset: usize,
+ ) -> Result<()> {
+ unsafe {
+ crate::profile_scope!("GPU::BatchEncode");
+
+ // Clear and reuse CPU buffer for validated indices
+ state.indices_cpu.clear();
+
+ // Validate and convert indices on CPU
+ let data_slice = std::slice::from_raw_parts(host_buffer.ptr(),
samples_in_chunk);
+ for (i, &val) in data_slice.iter().enumerate() {
+ if !val.is_finite() {
+ return Err(MahoutError::InvalidInput(format!(
+ "Sample {}: basis index must be finite",
+ global_sample_offset + i
+ )));
+ }
+ if val < 0.0 {
+ return Err(MahoutError::InvalidInput(format!(
+ "Sample {}: basis index must be non-negative",
+ global_sample_offset + i
+ )));
+ }
+ if val.fract() != 0.0 {
+ return Err(MahoutError::InvalidInput(format!(
+ "Sample {}: basis index must be an integer, got {}",
+ global_sample_offset + i,
+ val
+ )));
+ }
+ let index = val as usize;
+ if index >= state_len {
+ return Err(MahoutError::InvalidInput(format!(
+ "Sample {}: basis index {} exceeds state size {} (max:
{})",
+ global_sample_offset + i,
+ index,
+ state_len,
+ state_len - 1
+ )));
+ }
+ state.indices_cpu.push(index);
+ }
+
+ // Copy indices to pre-allocated GPU buffer (slice to match actual
chunk size)
+ let mut gpu_slice =
state.indices_gpu.slice_mut(0..samples_in_chunk);
+ engine
+ .device
+ .htod_sync_copy_into(&state.indices_cpu, &mut gpu_slice)
+ .map_err(|e| {
+ MahoutError::MemoryAllocation(format!(
+ "Failed to upload basis indices to GPU: {:?}",
+ e
+ ))
+ })?;
+
+ // Launch basis encoding kernel
+ {
+ crate::profile_scope!("GPU::BasisEncodeBatch");
+ let ret = launch_basis_encode_batch(
+ *state.indices_gpu.device_ptr() as *const usize,
+ state_ptr_offset,
+ samples_in_chunk,
+ state_len,
+ num_qubits as u32,
+ ctx.stream_compute.stream as *mut c_void,
+ );
+ if ret != 0 {
+ return Err(MahoutError::KernelLaunch(format!(
+ "Basis encode kernel error: {}",
+ ret
+ )));
+ }
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/qdp/qdp-core/src/encoding/mod.rs b/qdp/qdp-core/src/encoding/mod.rs
new file mode 100644
index 000000000..df69941ea
--- /dev/null
+++ b/qdp/qdp-core/src/encoding/mod.rs
@@ -0,0 +1,372 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Streaming encoding implementations for different quantum encoding methods.
+
+mod amplitude;
+mod basis;
+
+use std::ffi::c_void;
+use std::sync::Arc;
+use std::sync::mpsc::{Receiver, SyncSender, sync_channel};
+use std::thread::{self, JoinHandle};
+
+use cudarc::driver::{CudaDevice, DevicePtr};
+
+/// Guard that ensures GPU synchronization and IO thread cleanup on drop.
+/// Used to handle early returns in `stream_encode`.
+struct CleanupGuard<'a> {
+ device: &'a Arc<CudaDevice>,
+ io_handle: Option<JoinHandle<()>>,
+}
+
+impl<'a> CleanupGuard<'a> {
+ fn new(device: &'a Arc<CudaDevice>, io_handle: JoinHandle<()>) -> Self {
+ Self {
+ device,
+ io_handle: Some(io_handle),
+ }
+ }
+
+ /// Defuse the guard and return the IO handle for explicit cleanup.
+ /// After calling this, drop() will not perform cleanup.
+ fn defuse(mut self) -> JoinHandle<()> {
+ self.io_handle.take().expect("IO handle already taken")
+ }
+}
+
+impl Drop for CleanupGuard<'_> {
+ fn drop(&mut self) {
+ // Best-effort cleanup on early return
+ let _ = self.device.synchronize();
+ if let Some(handle) = self.io_handle.take() {
+ let _ = handle.join();
+ }
+ }
+}
+
+use crate::dlpack::DLManagedTensor;
+use crate::gpu::PipelineContext;
+use crate::gpu::memory::{GpuStateVector, PinnedHostBuffer};
+use crate::reader::StreamingDataReader;
+use crate::{MahoutError, QdpEngine, Result};
+
+/// 512MB staging buffer for large Parquet row groups (reduces fragmentation)
+pub(crate) const STAGE_SIZE_BYTES: usize = 512 * 1024 * 1024;
+pub(crate) const STAGE_SIZE_ELEMENTS: usize = STAGE_SIZE_BYTES /
std::mem::size_of::<f64>();
+
+pub(crate) type FullBufferResult = std::result::Result<(PinnedHostBuffer,
usize), MahoutError>;
+pub(crate) type FullBufferChannel = (SyncSender<FullBufferResult>,
Receiver<FullBufferResult>);
+
+/// Trait for chunk-based quantum state encoding.
+///
+/// Implementations provide the encoding-specific logic while the shared
+/// streaming pipeline handles IO, buffering, and GPU memory management.
+pub(crate) trait ChunkEncoder {
+ /// Encoder-specific state (e.g., norm buffer for amplitude encoding).
+ type State;
+
+ /// Validate that the sample size is appropriate for this encoding method.
+ fn validate_sample_size(&self, sample_size: usize) -> Result<()>;
+
+ /// Whether this encoder needs the staging buffer H2D copy.
+ ///
+ /// If false, the streaming pipeline will skip the async copy to device
+ /// staging buffer, avoiding unnecessary memory bandwidth overhead.
+ /// Encoders that process data on CPU before uploading should return false.
+ fn needs_staging_copy(&self) -> bool {
+ true
+ }
+
+ /// Initialize encoder-specific state.
+ fn init_state(
+ &self,
+ engine: &QdpEngine,
+ sample_size: usize,
+ num_qubits: usize,
+ ) -> Result<Self::State>;
+
+ /// Encode a chunk of samples to quantum states.
+ ///
+ /// # Arguments
+ /// * `state` - Encoder-specific state
+ /// * `engine` - QDP engine for GPU operations
+ /// * `ctx` - Pipeline context for async operations
+ /// * `host_buffer` - Pinned host buffer containing input data
+ /// * `dev_ptr` - Device pointer to staging buffer with copied data
+ /// * `samples_in_chunk` - Number of samples in this chunk
+ /// * `sample_size` - Size of each sample in f64 elements
+ /// * `state_ptr_offset` - Pointer to output location in state vector
+ /// * `state_len` - Length of each quantum state (2^num_qubits)
+ /// * `num_qubits` - Number of qubits
+ #[allow(clippy::too_many_arguments)]
+ fn encode_chunk(
+ &self,
+ state: &mut Self::State,
+ engine: &QdpEngine,
+ ctx: &PipelineContext,
+ host_buffer: &PinnedHostBuffer,
+ dev_ptr: u64,
+ samples_in_chunk: usize,
+ sample_size: usize,
+ state_ptr_offset: *mut c_void,
+ state_len: usize,
+ num_qubits: usize,
+ global_sample_offset: usize,
+ ) -> Result<()>;
+}
+
+/// Shared streaming pipeline for encoding data from Parquet files.
+///
+/// This function handles all the common IO, buffering, and GPU memory
+/// management logic. The actual encoding is delegated to the `ChunkEncoder`.
+pub(crate) fn stream_encode<E: ChunkEncoder>(
+ engine: &QdpEngine,
+ path: &str,
+ num_qubits: usize,
+ encoder: E,
+) -> Result<*mut DLManagedTensor> {
+ // Initialize reader
+ let mut reader_core = crate::io::ParquetBlockReader::new(path, None)?;
+ let num_samples = reader_core.total_rows;
+
+ // Allocate output state vector
+ let total_state_vector = GpuStateVector::new_batch(&engine.device,
num_samples, num_qubits)?;
+ const PIPELINE_EVENT_SLOTS: usize = 2;
+ let ctx = PipelineContext::new(&engine.device, PIPELINE_EVENT_SLOTS)?;
+
+ // Check if encoder needs staging buffers before allocating
+ let needs_staging_copy = encoder.needs_staging_copy();
+
+ // Double-buffered device staging (only allocated if needed)
+ let dev_staging = if needs_staging_copy {
+ let dev_in_a = unsafe {
engine.device.alloc::<f64>(STAGE_SIZE_ELEMENTS) }
+ .map_err(|e| MahoutError::MemoryAllocation(format!("{:?}", e)))?;
+ let dev_in_b = unsafe {
engine.device.alloc::<f64>(STAGE_SIZE_ELEMENTS) }
+ .map_err(|e| MahoutError::MemoryAllocation(format!("{:?}", e)))?;
+ Some((dev_in_a, dev_in_b))
+ } else {
+ None
+ };
+
+ // Channel setup for async IO
+ let (full_buf_tx, full_buf_rx): FullBufferChannel = sync_channel(2);
+ let (empty_buf_tx, empty_buf_rx): (SyncSender<PinnedHostBuffer>, _) =
sync_channel(2);
+
+ // Read first chunk to determine sample size
+ let mut host_buf_first = PinnedHostBuffer::new(STAGE_SIZE_ELEMENTS)?;
+ let first_len = reader_core.read_chunk(host_buf_first.as_slice_mut())?;
+
+ let sample_size = reader_core
+ .get_sample_size()
+ .ok_or_else(|| MahoutError::InvalidInput("Could not determine sample
size".into()))?;
+
+ // Validate sample size for this encoder
+ encoder.validate_sample_size(sample_size)?;
+
+ // Initialize encoder-specific state
+ let mut encoder_state = encoder.init_state(engine, sample_size,
num_qubits)?;
+
+ let state_len = 1 << num_qubits;
+
+ // Send first buffer to processing
+ full_buf_tx
+ .send(Ok((host_buf_first, first_len)))
+ .map_err(|_| MahoutError::Io("Failed to send first buffer".into()))?;
+
+ // Send second empty buffer for IO thread
+ empty_buf_tx
+ .send(PinnedHostBuffer::new(STAGE_SIZE_ELEMENTS)?)
+ .map_err(|_| MahoutError::Io("Failed to send second buffer".into()))?;
+
+ // Spawn IO thread
+ let mut reader = reader_core;
+ let io_handle = thread::spawn(move || {
+ loop {
+ let mut buffer = match empty_buf_rx.recv() {
+ Ok(b) => b,
+ Err(_) => break,
+ };
+
+ let result = reader
+ .read_chunk(buffer.as_slice_mut())
+ .map(|len| (buffer, len));
+
+ let should_break = match &result {
+ Ok((_, len)) => *len == 0,
+ Err(_) => true,
+ };
+
+ if full_buf_tx.send(result).is_err() {
+ break;
+ }
+
+ if should_break {
+ break;
+ }
+ }
+ });
+
+ // Create cleanup guard to ensure resources are released on early return
+ let cleanup_guard = CleanupGuard::new(&engine.device, io_handle);
+
+ // Main processing loop
+ let mut global_sample_offset: usize = 0;
+ let mut use_dev_a = true;
+
+ loop {
+ let (host_buffer, current_len) = match full_buf_rx.recv() {
+ Ok(Ok((buffer, len))) => (buffer, len),
+ Ok(Err(e)) => return Err(e),
+ Err(_) => return Err(MahoutError::Io("IO thread
disconnected".into())),
+ };
+
+ if current_len == 0 {
+ break;
+ }
+
+ if current_len % sample_size != 0 {
+ return Err(MahoutError::InvalidInput(format!(
+ "Chunk length {} is not a multiple of sample size {}",
+ current_len, sample_size
+ )));
+ }
+
+ let samples_in_chunk = current_len / sample_size;
+ if samples_in_chunk > 0 {
+ let event_slot = if use_dev_a { 0 } else { 1 };
+ // Get device pointer from staging buffers (0 if not allocated)
+ let dev_ptr = dev_staging
+ .as_ref()
+ .map(|(a, b)| {
+ if use_dev_a {
+ *a.device_ptr()
+ } else {
+ *b.device_ptr()
+ }
+ })
+ .unwrap_or(0);
+
+ unsafe {
+ crate::profile_scope!("GPU::Dispatch");
+
+ // Async copy to device (only if staging buffers are allocated)
+ if dev_staging.is_some() {
+ ctx.async_copy_to_device(
+ host_buffer.ptr() as *const c_void,
+ dev_ptr as *mut c_void,
+ current_len,
+ )?;
+ ctx.record_copy_done(event_slot)?;
+ ctx.wait_for_copy(event_slot)?;
+ }
+
+ // Calculate output offset
+ let offset_elements =
+ global_sample_offset.checked_mul(state_len).ok_or_else(|| {
+ MahoutError::MemoryAllocation(format!(
+ "Offset calculation overflow: {} * {}",
+ global_sample_offset, state_len
+ ))
+ })?;
+
+ let offset_bytes = offset_elements
+
.checked_mul(std::mem::size_of::<qdp_kernels::CuDoubleComplex>())
+ .ok_or_else(|| {
+ MahoutError::MemoryAllocation(format!(
+ "Offset bytes calculation overflow: {} * {}",
+ offset_elements,
+ std::mem::size_of::<qdp_kernels::CuDoubleComplex>()
+ ))
+ })?;
+
+ let state_ptr_offset = total_state_vector
+ .ptr_void()
+ .cast::<u8>()
+ .add(offset_bytes)
+ .cast::<c_void>();
+
+ // Delegate to encoder
+ encoder.encode_chunk(
+ &mut encoder_state,
+ engine,
+ &ctx,
+ &host_buffer,
+ dev_ptr,
+ samples_in_chunk,
+ sample_size,
+ state_ptr_offset,
+ state_len,
+ num_qubits,
+ global_sample_offset,
+ )?;
+
+ if dev_staging.is_some() {
+ ctx.sync_copy_stream()?;
+ }
+ }
+
+ global_sample_offset = global_sample_offset
+ .checked_add(samples_in_chunk)
+ .ok_or_else(|| {
+ MahoutError::MemoryAllocation(format!(
+ "Sample offset overflow: {} + {}",
+ global_sample_offset, samples_in_chunk
+ ))
+ })?;
+ use_dev_a = !use_dev_a;
+ }
+
+ let _ = empty_buf_tx.send(host_buffer);
+ }
+
+ // Defuse guard for explicit cleanup with proper error handling
+ let io_handle = cleanup_guard.defuse();
+
+ engine
+ .device
+ .synchronize()
+ .map_err(|e| MahoutError::Cuda(format!("{:?}", e)))?;
+ io_handle
+ .join()
+ .map_err(|e| MahoutError::Io(format!("IO thread panicked: {:?}", e)))?;
+
+ let dlpack_ptr = total_state_vector.to_dlpack();
+ Ok(dlpack_ptr)
+}
+
+/// Encode data from a Parquet file using the specified encoding method.
+pub(crate) fn encode_from_parquet(
+ engine: &QdpEngine,
+ path: &str,
+ num_qubits: usize,
+ encoding_method: &str,
+) -> Result<*mut DLManagedTensor> {
+ match encoding_method {
+ "amplitude" => {
+ crate::profile_scope!("Mahout::EncodeAmplitudeFromParquet");
+ stream_encode(engine, path, num_qubits,
amplitude::AmplitudeEncoder)
+ }
+ "basis" => {
+ crate::profile_scope!("Mahout::EncodeBasisFromParquet");
+ stream_encode(engine, path, num_qubits, basis::BasisEncoder)
+ }
+ _ => Err(MahoutError::NotImplemented(format!(
+ "Encoding method '{}' not supported for streaming",
+ encoding_method
+ ))),
+ }
+}
diff --git a/qdp/qdp-core/src/lib.rs b/qdp/qdp-core/src/lib.rs
index cb44ef36b..257916a34 100644
--- a/qdp/qdp-core/src/lib.rs
+++ b/qdp/qdp-core/src/lib.rs
@@ -15,6 +15,8 @@
// limitations under the License.
pub mod dlpack;
+#[cfg(target_os = "linux")]
+mod encoding;
pub mod error;
pub mod gpu;
pub mod io;
diff --git a/qdp/qdp-core/src/platform/linux.rs
b/qdp/qdp-core/src/platform/linux.rs
index d38efd379..7bea3f456 100644
--- a/qdp/qdp-core/src/platform/linux.rs
+++ b/qdp/qdp-core/src/platform/linux.rs
@@ -14,246 +14,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::ffi::c_void;
-use std::sync::mpsc::{Receiver, SyncSender, sync_channel};
-use std::thread;
+//! Linux platform-specific implementations.
+//!
+//! This module provides the platform entry point for Parquet streaming
encoding.
+//! The actual encoding implementations are in the `crate::encoding` module.
use crate::dlpack::DLManagedTensor;
-use crate::gpu::PipelineContext;
-use crate::gpu::memory::{GpuStateVector, PinnedHostBuffer};
-use crate::reader::StreamingDataReader;
-use crate::{MahoutError, QdpEngine, Result};
-use cudarc::driver::{DevicePtr, DevicePtrMut};
-use qdp_kernels::{launch_amplitude_encode_batch, launch_l2_norm_batch};
-
-/// 512MB staging buffer for large Parquet row groups (reduces fragmentation)
-const STAGE_SIZE_BYTES: usize = 512 * 1024 * 1024;
-const STAGE_SIZE_ELEMENTS: usize = STAGE_SIZE_BYTES /
std::mem::size_of::<f64>();
-type FullBufferResult = std::result::Result<(PinnedHostBuffer, usize),
MahoutError>;
-type FullBufferChannel = (SyncSender<FullBufferResult>,
Receiver<FullBufferResult>);
+use crate::{QdpEngine, Result};
+/// Encode data from a Parquet file using the specified encoding method.
+///
+/// This is the Linux platform entry point that delegates to the encoding
module.
pub(crate) fn encode_from_parquet(
engine: &QdpEngine,
path: &str,
num_qubits: usize,
encoding_method: &str,
) -> Result<*mut DLManagedTensor> {
- crate::profile_scope!("Mahout::EncodeFromParquet");
-
- if encoding_method != "amplitude" {
- return Err(MahoutError::NotImplemented(
- "Only amplitude encoding supported for streaming".into(),
- ));
- }
-
- let mut reader_core = crate::io::ParquetBlockReader::new(path, None)?;
- let num_samples = reader_core.total_rows;
-
- let total_state_vector = GpuStateVector::new_batch(&engine.device,
num_samples, num_qubits)?;
- const PIPELINE_EVENT_SLOTS: usize = 2; // matches double-buffered staging
buffers
- let ctx = PipelineContext::new(&engine.device, PIPELINE_EVENT_SLOTS)?;
-
- let dev_in_a = unsafe { engine.device.alloc::<f64>(STAGE_SIZE_ELEMENTS) }
- .map_err(|e| MahoutError::MemoryAllocation(format!("{:?}", e)))?;
- let dev_in_b = unsafe { engine.device.alloc::<f64>(STAGE_SIZE_ELEMENTS) }
- .map_err(|e| MahoutError::MemoryAllocation(format!("{:?}", e)))?;
-
- let (full_buf_tx, full_buf_rx): FullBufferChannel = sync_channel(2);
- let (empty_buf_tx, empty_buf_rx): (SyncSender<PinnedHostBuffer>,
Receiver<PinnedHostBuffer>) =
- sync_channel(2);
-
- let mut host_buf_first = PinnedHostBuffer::new(STAGE_SIZE_ELEMENTS)?;
- let first_len = reader_core.read_chunk(host_buf_first.as_slice_mut())?;
-
- let sample_size = reader_core
- .get_sample_size()
- .ok_or_else(|| MahoutError::InvalidInput("Could not determine sample
size".into()))?;
-
- if sample_size == 0 {
- return Err(MahoutError::InvalidInput(
- "Sample size cannot be zero".into(),
- ));
- }
- if sample_size > STAGE_SIZE_ELEMENTS {
- return Err(MahoutError::InvalidInput(format!(
- "Sample size {} exceeds staging buffer capacity {}",
- sample_size, STAGE_SIZE_ELEMENTS
- )));
- }
-
- let max_samples_in_chunk = STAGE_SIZE_ELEMENTS / sample_size;
- let mut norm_buffer = engine
- .device
- .alloc_zeros::<f64>(max_samples_in_chunk)
- .map_err(|e| {
- MahoutError::MemoryAllocation(format!("Failed to allocate norm
buffer: {:?}", e))
- })?;
-
- full_buf_tx
- .send(Ok((host_buf_first, first_len)))
- .map_err(|_| MahoutError::Io("Failed to send first buffer".into()))?;
-
- empty_buf_tx
- .send(PinnedHostBuffer::new(STAGE_SIZE_ELEMENTS)?)
- .map_err(|_| MahoutError::Io("Failed to send second buffer".into()))?;
-
- let mut reader = reader_core;
- let io_handle = thread::spawn(move || {
- loop {
- let mut buffer = match empty_buf_rx.recv() {
- Ok(b) => b,
- Err(_) => break,
- };
-
- let result = reader
- .read_chunk(buffer.as_slice_mut())
- .map(|len| (buffer, len));
-
- let should_break = match &result {
- Ok((_, len)) => *len == 0,
- Err(_) => true,
- };
-
- if full_buf_tx.send(result).is_err() {
- break;
- }
-
- if should_break {
- break;
- }
- }
- });
-
- let mut global_sample_offset: usize = 0;
- let mut use_dev_a = true;
- let state_len_per_sample = 1 << num_qubits;
-
- loop {
- let (host_buffer, current_len) = match full_buf_rx.recv() {
- Ok(Ok((buffer, len))) => (buffer, len),
- Ok(Err(e)) => return Err(e),
- Err(_) => return Err(MahoutError::Io("IO thread
disconnected".into())),
- };
-
- if current_len == 0 {
- break;
- }
-
- if current_len % sample_size != 0 {
- return Err(MahoutError::InvalidInput(format!(
- "Chunk length {} is not a multiple of sample size {}",
- current_len, sample_size
- )));
- }
-
- let samples_in_chunk = current_len / sample_size;
- if samples_in_chunk > 0 {
- let event_slot = if use_dev_a { 0 } else { 1 };
- let dev_ptr = if use_dev_a {
- *dev_in_a.device_ptr()
- } else {
- *dev_in_b.device_ptr()
- };
-
- unsafe {
- crate::profile_scope!("GPU::Dispatch");
-
- ctx.async_copy_to_device(
- host_buffer.ptr() as *const c_void,
- dev_ptr as *mut c_void,
- current_len,
- )?;
- ctx.record_copy_done(event_slot)?;
- ctx.wait_for_copy(event_slot)?;
-
- {
- crate::profile_scope!("GPU::BatchEncode");
- let offset_elements = global_sample_offset
- .checked_mul(state_len_per_sample)
- .ok_or_else(|| {
- MahoutError::MemoryAllocation(format!(
- "Offset calculation overflow: {} * {}",
- global_sample_offset, state_len_per_sample
- ))
- })?;
-
- let offset_bytes = offset_elements
-
.checked_mul(std::mem::size_of::<qdp_kernels::CuDoubleComplex>())
- .ok_or_else(|| {
- MahoutError::MemoryAllocation(format!(
- "Offset bytes calculation overflow: {} * {}",
- offset_elements,
-
std::mem::size_of::<qdp_kernels::CuDoubleComplex>()
- ))
- })?;
-
- let state_ptr_offset = total_state_vector
- .ptr_void()
- .cast::<u8>()
- .add(offset_bytes)
- .cast::<std::ffi::c_void>();
-
- {
- crate::profile_scope!("GPU::NormBatch");
- let ret = launch_l2_norm_batch(
- dev_ptr as *const f64,
- samples_in_chunk,
- sample_size,
- *norm_buffer.device_ptr_mut() as *mut f64,
- ctx.stream_compute.stream as *mut c_void,
- );
- if ret != 0 {
- return Err(MahoutError::KernelLaunch(format!(
- "Norm kernel error: {}",
- ret
- )));
- }
- }
-
- {
- crate::profile_scope!("GPU::EncodeBatch");
- let ret = launch_amplitude_encode_batch(
- dev_ptr as *const f64,
- state_ptr_offset,
- *norm_buffer.device_ptr() as *const f64,
- samples_in_chunk,
- sample_size,
- state_len_per_sample,
- ctx.stream_compute.stream as *mut c_void,
- );
- if ret != 0 {
- return Err(MahoutError::KernelLaunch(format!(
- "Encode kernel error: {}",
- ret
- )));
- }
- }
- }
-
- ctx.sync_copy_stream()?;
- }
- global_sample_offset = global_sample_offset
- .checked_add(samples_in_chunk)
- .ok_or_else(|| {
- MahoutError::MemoryAllocation(format!(
- "Sample offset overflow: {} + {}",
- global_sample_offset, samples_in_chunk
- ))
- })?;
- use_dev_a = !use_dev_a;
- }
-
- let _ = empty_buf_tx.send(host_buffer);
- }
-
- engine
- .device
- .synchronize()
- .map_err(|e| MahoutError::Cuda(format!("{:?}", e)))?;
- io_handle
- .join()
- .map_err(|e| MahoutError::Io(format!("IO thread panicked: {:?}", e)))?;
-
- let dlpack_ptr = total_state_vector.to_dlpack();
- Ok(dlpack_ptr)
+ crate::encoding::encode_from_parquet(engine, path, num_qubits,
encoding_method)
}
diff --git a/qdp/qdp-core/src/readers/parquet.rs
b/qdp/qdp-core/src/readers/parquet.rs
index 5322d120e..db1148c55 100644
--- a/qdp/qdp-core/src/readers/parquet.rs
+++ b/qdp/qdp-core/src/readers/parquet.rs
@@ -303,9 +303,12 @@ impl ParquetStreamingReader {
)));
}
}
+ DataType::Float64 => {
+ // Scalar Float64 for basis encoding (one index per sample)
+ }
_ => {
return Err(MahoutError::InvalidInput(format!(
- "Expected List<Float64> or FixedSizeList<Float64> column,
got {:?}",
+ "Expected Float64, List<Float64>, or
FixedSizeList<Float64> column, got {:?}",
field.data_type()
)));
}
@@ -484,9 +487,35 @@ impl StreamingDataReader for ParquetStreamingReader {
(current_sample_size, batch_values)
}
+ DataType::Float64 => {
+ // Scalar Float64 for basis encoding (one index
per sample)
+ let float_array = column
+ .as_any()
+ .downcast_ref::<Float64Array>()
+ .ok_or_else(|| {
+ MahoutError::Io(
+ "Failed to downcast to
Float64Array".to_string(),
+ )
+ })?;
+
+ if float_array.is_empty() {
+ continue;
+ }
+
+ let current_sample_size = 1;
+
+ let mut batch_values = Vec::new();
+ if float_array.null_count() == 0 {
+
batch_values.extend_from_slice(float_array.values());
+ } else {
+ return Err(MahoutError::Io("Null value
encountered in Float64Array during quantum encoding. Please check data quality
at the source.".to_string()));
+ }
+
+ (current_sample_size, batch_values)
+ }
_ => {
return Err(MahoutError::Io(format!(
- "Expected List<Float64> or
FixedSizeList<Float64>, got {:?}",
+ "Expected Float64, List<Float64>, or
FixedSizeList<Float64>, got {:?}",
column.data_type()
)));
}
diff --git a/qdp/qdp-python/benchmark/benchmark_e2e.py
b/qdp/qdp-python/benchmark/benchmark_e2e.py
index 4e22dfb58..28e0afbd5 100644
--- a/qdp/qdp-python/benchmark/benchmark_e2e.py
+++ b/qdp/qdp-python/benchmark/benchmark_e2e.py
@@ -40,7 +40,7 @@ import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.ipc as ipc
from _qdp import QdpEngine
-from utils import generate_batch_data
+from utils import generate_batch_data, normalize_batch
# Competitors
try:
@@ -93,17 +93,22 @@ def generate_data(n_qubits, n_samples, encoding_method: str
= "amplitude"):
# Generate all data at once
all_data = generate_batch_data(n_samples, dim, encoding_method, seed=42)
- # Save as Parquet (List format for PennyLane/Qiskit)
- feature_vectors = [row.tolist() for row in all_data]
- table = pa.table(
- {"feature_vector": pa.array(feature_vectors,
type=pa.list_(pa.float64()))}
- )
+ # Save as Parquet
+ if encoding_method == "basis":
+ # For basis encoding, save single scalar indices (not lists)
+ table = pa.table({"index": pa.array(all_data.flatten(),
type=pa.float64())})
+ else:
+ # For amplitude encoding, use List format for PennyLane/Qiskit
compatibility
+ feature_vectors = [row.tolist() for row in all_data]
+ table = pa.table(
+ {"feature_vector": pa.array(feature_vectors,
type=pa.list_(pa.float64()))}
+ )
pq.write_table(table, DATA_FILE)
# Save as Arrow IPC (FixedSizeList format for Mahout)
if encoding_method == "basis":
- # For basis encoding, create a simple array of indices
- arr = pa.array(all_data.flatten(), type=pa.float64())
+ # For basis encoding, use FixedSizeList(len=1) for Mahout Arrow reader
compatibility
+ arr = pa.FixedSizeListArray.from_arrays(pa.array(all_data.flatten()),
1)
arrow_table = pa.table({"data": arr})
else:
# For amplitude encoding, use FixedSizeList format
@@ -154,9 +159,7 @@ def run_qiskit(n_qubits, n_samples):
batch = raw_data[i : i + BATCH_SIZE]
# Normalize
- norms = np.linalg.norm(batch, axis=1, keepdims=True)
- norms[norms == 0] = 1.0
- batch = batch / norms
+ batch = normalize_batch(batch)
# State preparation
batch_states = []
@@ -193,7 +196,7 @@ def run_qiskit(n_qubits, n_samples):
# -----------------------------------------------------------
# 2. PennyLane Full Pipeline
# -----------------------------------------------------------
-def run_pennylane(n_qubits, n_samples):
+def run_pennylane(n_qubits, n_samples, encoding_method: str = "amplitude"):
if not HAS_PENNYLANE:
print("\n[PennyLane] Not installed, skipping.")
return 0.0, None
@@ -201,17 +204,22 @@ def run_pennylane(n_qubits, n_samples):
# Clean cache before starting benchmark
clean_cache()
- print("\n[PennyLane] Full Pipeline (Disk -> GPU)...")
+ print(f"\n[PennyLane] Full Pipeline (Disk -> GPU) - {encoding_method}
encoding...")
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev, interface="torch")
- def circuit(inputs):
+ def amplitude_circuit(inputs):
qml.AmplitudeEmbedding(
features=inputs, wires=range(n_qubits), normalize=True,
pad_with=0.0
)
return qml.state()
+ @qml.qnode(dev, interface="torch")
+ def basis_circuit(basis_state):
+ qml.BasisEmbedding(features=basis_state, wires=range(n_qubits))
+ return qml.state()
+
model = DummyQNN(n_qubits).cuda()
torch.cuda.synchronize()
@@ -221,7 +229,10 @@ def run_pennylane(n_qubits, n_samples):
import pandas as pd
df = pd.read_parquet(DATA_FILE)
- raw_data = np.stack(df["feature_vector"].values)
+ if encoding_method == "basis":
+ raw_data = df["index"].values.astype(np.int64)
+ else:
+ raw_data = np.stack(df["feature_vector"].values)
io_time = time.perf_counter() - start_time
print(f" IO Time: {io_time:.4f} s")
@@ -229,13 +240,22 @@ def run_pennylane(n_qubits, n_samples):
# Process batches
for i in range(0, n_samples, BATCH_SIZE):
- batch_cpu = torch.tensor(raw_data[i : i + BATCH_SIZE])
-
- # Execute QNode
- try:
- state_cpu = circuit(batch_cpu)
- except Exception:
- state_cpu = torch.stack([circuit(x) for x in batch_cpu])
+ if encoding_method == "basis":
+ batch_indices = raw_data[i : i + BATCH_SIZE]
+ # Convert indices to binary representation for BasisEmbedding
+ batch_states = []
+ for idx in batch_indices:
+ binary_list = [int(b) for b in format(int(idx),
f"0{n_qubits}b")]
+ state_cpu = basis_circuit(binary_list)
+ batch_states.append(state_cpu)
+ state_cpu = torch.stack(batch_states)
+ else:
+ batch_cpu = torch.tensor(raw_data[i : i + BATCH_SIZE])
+ # Execute QNode
+ try:
+ state_cpu = amplitude_circuit(batch_cpu)
+ except Exception:
+ state_cpu = torch.stack([amplitude_circuit(x) for x in
batch_cpu])
all_pl_states.append(state_cpu)
@@ -273,13 +293,7 @@ def run_mahout_parquet(engine, n_qubits, n_samples,
encoding_method: str = "ampl
# Direct Parquet to GPU pipeline
parquet_encode_start = time.perf_counter()
- try:
- qtensor = engine.encode(DATA_FILE, n_qubits, encoding_method)
- except RuntimeError as e:
- if "Only amplitude encoding supported" in str(e):
- print("Basis encoding not supported for streaming from Parquet,
skipping.")
- return 0.0, None
- raise
+ qtensor = engine.encode(DATA_FILE, n_qubits, encoding_method)
parquet_encode_time = time.perf_counter() - parquet_encode_start
print(f" Parquet->GPU (IO+Encode): {parquet_encode_time:.4f} s")
@@ -330,13 +344,7 @@ def run_mahout_arrow(engine, n_qubits, n_samples,
encoding_method: str = "amplit
start_time = time.perf_counter()
arrow_encode_start = time.perf_counter()
- try:
- qtensor = engine.encode(ARROW_FILE, n_qubits, encoding_method)
- except RuntimeError as e:
- if "Only amplitude encoding supported" in str(e):
- print(" Basis encoding not supported for streaming from Arrow,
skipping.")
- return 0.0, None
- raise
+ qtensor = engine.encode(ARROW_FILE, n_qubits, encoding_method)
arrow_encode_time = time.perf_counter() - arrow_encode_start
print(f" Arrow->GPU (IO+Encode): {arrow_encode_time:.4f} s")
@@ -462,7 +470,9 @@ if __name__ == "__main__":
# Run benchmarks
if "pennylane" in args.frameworks:
- t_pl, pl_all_states = run_pennylane(args.qubits, args.samples)
+ t_pl, pl_all_states = run_pennylane(
+ args.qubits, args.samples, args.encoding_method
+ )
# Clean cache between framework benchmarks
clean_cache()
diff --git a/qdp/qdp-python/uv.lock b/qdp/qdp-python/uv.lock
index 36450e241..46fa970b1 100644
--- a/qdp/qdp-python/uv.lock
+++ b/qdp/qdp-python/uv.lock
@@ -283,18 +283,6 @@ wheels = [
{ url =
"https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl",
hash =
"sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size
= 119668, upload-time = "2025-04-16T00:41:47.671Z" },
]
-[[package]]
-name = "exceptiongroup"
-version = "1.3.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "typing-extensions", marker = "python_full_version < '3.11'" },
-]
-sdist = { url =
"https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz",
hash =
"sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size
= 30371, upload-time = "2025-11-21T23:01:54.787Z" }
-wheels = [
- { url =
"https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl",
hash =
"sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size
= 16740, upload-time = "2025-11-21T23:01:53.443Z" },
-]
-
[[package]]
name = "filelock"
version = "3.20.0"
@@ -459,15 +447,6 @@ wheels = [
{ url =
"https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl",
hash =
"sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size
= 71008, upload-time = "2025-10-12T14:55:18.883Z" },
]
-[[package]]
-name = "iniconfig"
-version = "2.3.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url =
"https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz",
hash =
"sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size
= 20503, upload-time = "2025-10-18T21:55:43.219Z" }
-wheels = [
- { url =
"https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl",
hash =
"sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size
= 7484, upload-time = "2025-10-18T21:55:41.639Z" },
-]
-
[[package]]
name = "jinja2"
version = "3.1.6"
@@ -716,30 +695,6 @@ wheels = [
{ url =
"https://files.pythonhosted.org/packages/73/e4/6d6f14b2a759c622f191b2d67e9075a3f56aaccb3be4bb9bb6890030d0a0/matplotlib-3.10.8-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl",
hash =
"sha256:1ae029229a57cd1e8fe542485f27e7ca7b23aa9e8944ddb4985d0bc444f1eca2", size
= 8713867, upload-time = "2025-12-10T22:56:48.954Z" },
]
-[[package]]
-name = "maturin"
-version = "1.10.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "tomli", marker = "python_full_version < '3.11'" },
-]
-sdist = { url =
"https://files.pythonhosted.org/packages/02/44/c593afce7d418ae6016b955c978055232359ad28c707a9ac6643fc60512d/maturin-1.10.2.tar.gz",
hash =
"sha256:259292563da89850bf8f7d37aa4ddba22905214c1e180b1c8f55505dfd8c0e81", size
= 217835, upload-time = "2025-11-19T11:53:17.348Z" }
-wheels = [
- { url =
"https://files.pythonhosted.org/packages/15/74/7f7e93019bb71aa072a7cdf951cbe4c9a8d5870dd86c66ec67002153487f/maturin-1.10.2-py3-none-linux_armv6l.whl",
hash =
"sha256:11c73815f21a755d2129c410e6cb19dbfacbc0155bfc46c706b69930c2eb794b", size
= 8763201, upload-time = "2025-11-19T11:52:42.98Z" },
- { url =
"https://files.pythonhosted.org/packages/4a/85/1d1b64dbb6518ee633bfde8787e251ae59428818fea7a6bdacb8008a09bd/maturin-1.10.2-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl",
hash =
"sha256:7fbd997c5347649ee7987bd05a92bd5b8b07efa4ac3f8bcbf6196e07eb573d89", size
= 17072583, upload-time = "2025-11-19T11:52:45.636Z" },
- { url =
"https://files.pythonhosted.org/packages/7c/45/2418f0d6e1cbdf890205d1dc73ebea6778bb9ce80f92e866576c701ded72/maturin-1.10.2-py3-none-macosx_10_12_x86_64.whl",
hash =
"sha256:e3ce9b2ad4fb9c341f450a6d32dc3edb409a2d582a81bc46ba55f6e3b6196b22", size
= 8827021, upload-time = "2025-11-19T11:52:48.143Z" },
- { url =
"https://files.pythonhosted.org/packages/7f/83/14c96ddc93b38745d8c3b85126f7d78a94f809a49dc9644bb22b0dc7b78c/maturin-1.10.2-py3-none-manylinux_2_12_i686.manylinux2010_i686.musllinux_1_1_i686.whl",
hash =
"sha256:f0d1b7b5f73c8d30a7e71cd2a2189a7f0126a3a3cd8b3d6843e7e1d4db50f759", size
= 8751780, upload-time = "2025-11-19T11:52:51.613Z" },
- { url =
"https://files.pythonhosted.org/packages/46/8d/753148c0d0472acd31a297f6d11c3263cd2668d38278ed29d523625f7290/maturin-1.10.2-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.musllinux_1_1_x86_64.whl",
hash =
"sha256:efcd496a3202ffe0d0489df1f83d08b91399782fb2dd545d5a1e7bf6fd81af39", size
= 9241884, upload-time = "2025-11-19T11:52:53.946Z" },
- { url =
"https://files.pythonhosted.org/packages/b9/f9/f5ca9fe8cad70cac6f3b6008598cc708f8a74dd619baced99784a6253f23/maturin-1.10.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl",
hash =
"sha256:a41ec70d99e27c05377be90f8e3c3def2a7bae4d0d9d5ea874aaf2d1da625d5c", size
= 8671736, upload-time = "2025-11-19T11:52:57.133Z" },
- { url =
"https://files.pythonhosted.org/packages/0a/76/f59cbcfcabef0259c3971f8b5754c85276a272028d8363386b03ec4e9947/maturin-1.10.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl",
hash =
"sha256:07a82864352feeaf2167247c8206937ef6c6ae9533025d416b7004ade0ea601d", size
= 8633475, upload-time = "2025-11-19T11:53:00.389Z" },
- { url =
"https://files.pythonhosted.org/packages/53/40/96cd959ad1dda6c12301860a74afece200a3209d84b393beedd5d7d915c0/maturin-1.10.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.musllinux_1_1_ppc64le.whl",
hash =
"sha256:04df81ee295dcda37828bd025a4ac688ea856e3946e4cb300a8f44a448de0069", size
= 11177118, upload-time = "2025-11-19T11:53:03.014Z" },
- { url =
"https://files.pythonhosted.org/packages/e5/b6/144f180f36314be183f5237011528f0e39fe5fd2e74e65c3b44a5795971e/maturin-1.10.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl",
hash =
"sha256:96e1d391e4c1fa87edf2a37e4d53d5f2e5f39dd880b9d8306ac9f8eb212d23f8", size
= 9320218, upload-time = "2025-11-19T11:53:05.39Z" },
- { url =
"https://files.pythonhosted.org/packages/eb/2d/2c483c1b3118e2e10fd8219d5291843f5f7c12284113251bf506144a3ac1/maturin-1.10.2-py3-none-manylinux_2_31_riscv64.whl",
hash =
"sha256:a217aa7c42aa332fb8e8377eb07314e1f02cf0fe036f614aca4575121952addd", size
= 8985266, upload-time = "2025-11-19T11:53:07.618Z" },
- { url =
"https://files.pythonhosted.org/packages/1d/98/1d0222521e112cd058b56e8d96c72cf9615f799e3b557adb4b16004f42aa/maturin-1.10.2-py3-none-win32.whl",
hash =
"sha256:da031771d9fb6ddb1d373638ec2556feee29e4507365cd5749a2d354bcadd818", size
= 7667897, upload-time = "2025-11-19T11:53:10.14Z" },
- { url =
"https://files.pythonhosted.org/packages/a0/ec/c6c973b1def0d04533620b439d5d7aebb257657ba66710885394514c8045/maturin-1.10.2-py3-none-win_amd64.whl",
hash =
"sha256:da777766fd584440dc9fecd30059a94f85e4983f58b09e438ae38ee4b494024c", size
= 8908416, upload-time = "2025-11-19T11:53:12.862Z" },
- { url =
"https://files.pythonhosted.org/packages/1b/01/7da60c9f7d5dc92dfa5e8888239fd0fb2613ee19e44e6db5c2ed5595fab3/maturin-1.10.2-py3-none-win_arm64.whl",
hash =
"sha256:a4c29a770ea2c76082e0afc6d4efd8ee94405588bfae00d10828f72e206c739b", size
= 7506680, upload-time = "2025-11-19T11:53:15.403Z" },
-]
-
[[package]]
name = "mdurl"
version = "0.1.2"
@@ -1057,22 +1012,6 @@ wheels = [
{ url =
"https://files.pythonhosted.org/packages/86/41/585a168330ff063014880a80d744219dbf1dd7a1c706e75ab3425a987384/pandas-2.3.3-cp312-cp312-win_amd64.whl",
hash =
"sha256:a16dcec078a01eeef8ee61bf64074b4e524a2a3f4b3be9326420cabe59c4778b", size
= 10992722, upload-time = "2025-09-29T23:20:54.139Z" },
]
-[[package]]
-name = "patchelf"
-version = "0.17.2.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url =
"https://files.pythonhosted.org/packages/2c/a3/fdd3fa938c864aa2f11dd0b7f08befeda983d2dcdee44da493c6977a653f/patchelf-0.17.2.4.tar.gz",
hash =
"sha256:970ee5cd8af33e5ea2099510b2f9013fa1b8d5cd763bf3fd3961281c18101a09", size
= 149629, upload-time = "2025-07-23T21:16:32.071Z" }
-wheels = [
- { url =
"https://files.pythonhosted.org/packages/b1/a7/8c4f86c78ec03db954d05fd9c57a114cc3a172a2d3e4a8b949cd5ff89471/patchelf-0.17.2.4-py3-none-macosx_10_9_universal2.whl",
hash =
"sha256:343bb1b94e959f9070ca9607453b04390e36bbaa33c88640b989cefad0aa049e", size
= 184436, upload-time = "2025-07-23T21:16:20.578Z" },
- { url =
"https://files.pythonhosted.org/packages/0b/6d/2e9f5483cdb352fab36b8076667b062b2d79cb09d2e3fd09b6fca5771cb6/patchelf-0.17.2.4-py3-none-manylinux1_i686.manylinux_2_5_i686.musllinux_1_1_i686.whl",
hash =
"sha256:09fd848d625a165fc7b7e07745508c24077129b019c4415a882938781d43adf8", size
= 547318, upload-time = "2025-07-23T21:16:22.135Z" },
- { url =
"https://files.pythonhosted.org/packages/7e/19/f7821ef31aab01fa7dc8ebe697ece88ec4f7a0fdd3155dab2dfee4b00e5c/patchelf-0.17.2.4-py3-none-manylinux1_x86_64.manylinux_2_5_x86_64.musllinux_1_1_x86_64.whl",
hash =
"sha256:d9b35ebfada70c02679ad036407d9724ffe1255122ba4ac5e4be5868618a5689", size
= 482846, upload-time = "2025-07-23T21:16:23.73Z" },
- { url =
"https://files.pythonhosted.org/packages/d1/50/107fea848ecfd851d473b079cab79107487d72c4c3cdb25b9d2603a24ca2/patchelf-0.17.2.4-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.musllinux_1_1_aarch64.whl",
hash =
"sha256:2931a1b5b85f3549661898af7bf746afbda7903c7c9a967cfc998a3563f84fad", size
= 477811, upload-time = "2025-07-23T21:16:25.145Z" },
- { url =
"https://files.pythonhosted.org/packages/89/a9/a9a2103e159fd65bffbc21ecc5c8c36e44eb34fe53b4ef85fb6d08c2a635/patchelf-0.17.2.4-py3-none-manylinux2014_armv7l.manylinux_2_17_armv7l.musllinux_1_1_armv7l.whl",
hash =
"sha256:ae44cb3c857d50f54b99e5697aa978726ada33a8a6129d4b8b7ffd28b996652d", size
= 431226, upload-time = "2025-07-23T21:16:26.765Z" },
- { url =
"https://files.pythonhosted.org/packages/87/93/897d612f6df7cfd987bdf668425127efeff8d8e4ad8bfbab1c69d2a0d861/patchelf-0.17.2.4-py3-none-manylinux2014_ppc64le.manylinux_2_17_ppc64le.musllinux_1_1_ppc64le.whl",
hash =
"sha256:680a266a70f60a7a4f4c448482c5bdba80cc8e6bb155a49dcc24238ba49927b0", size
= 540276, upload-time = "2025-07-23T21:16:27.983Z" },
- { url =
"https://files.pythonhosted.org/packages/5d/b8/2b92d11533482bac9ee989081d6880845287751b5f528adbd6bb27667fbd/patchelf-0.17.2.4-py3-none-manylinux2014_s390x.manylinux_2_17_s390x.musllinux_1_1_s390x.whl",
hash =
"sha256:d842b51f0401460f3b1f3a3a67d2c266a8f515a5adfbfa6e7b656cb3ac2ed8bc", size
= 596632, upload-time = "2025-07-23T21:16:29.253Z" },
- { url =
"https://files.pythonhosted.org/packages/14/e2/975d4bdb418f942b53e6187b95bd9e0d5e0488b7bc214685a1e43e2c2751/patchelf-0.17.2.4-py3-none-manylinux_2_31_riscv64.musllinux_1_1_riscv64.whl",
hash =
"sha256:7076d9e127230982e20a81a6e2358d3343004667ba510d9f822d4fdee29b0d71", size
= 508281, upload-time = "2025-07-23T21:16:30.865Z" },
-]
-
[[package]]
name = "pennylane"
version = "0.42.3"
@@ -1234,15 +1173,6 @@ wheels = [
{ url =
"https://files.pythonhosted.org/packages/95/7e/f896623c3c635a90537ac093c6a618ebe1a90d87206e42309cb5d98a1b9e/pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl",
hash =
"sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5", size
= 6997850, upload-time = "2025-10-15T18:24:11.495Z" },
]
-[[package]]
-name = "pluggy"
-version = "1.6.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url =
"https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz",
hash =
"sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size
= 69412, upload-time = "2025-05-15T12:30:07.975Z" }
-wheels = [
- { url =
"https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl",
hash =
"sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size
= 20538, upload-time = "2025-05-15T12:30:06.134Z" },
-]
-
[[package]]
name = "protobuf"
version = "6.33.3"
@@ -1321,24 +1251,6 @@ wheels = [
{ url =
"https://files.pythonhosted.org/packages/8b/40/2614036cdd416452f5bf98ec037f38a1afb17f327cb8e6b652d4729e0af8/pyparsing-3.3.1-py3-none-any.whl",
hash =
"sha256:023b5e7e5520ad96642e2c6db4cb683d3970bd640cdf7115049a6e9c3682df82", size
= 121793, upload-time = "2025-12-23T03:14:02.103Z" },
]
-[[package]]
-name = "pytest"
-version = "9.0.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "colorama", marker = "sys_platform == 'win32'" },
- { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
- { name = "iniconfig" },
- { name = "packaging" },
- { name = "pluggy" },
- { name = "pygments" },
- { name = "tomli", marker = "python_full_version < '3.11'" },
-]
-sdist = { url =
"https://files.pythonhosted.org/packages/07/56/f013048ac4bc4c1d9be45afd4ab209ea62822fb1598f40687e6bf45dcea4/pytest-9.0.1.tar.gz",
hash =
"sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8", size
= 1564125, upload-time = "2025-11-12T13:05:09.333Z" }
-wheels = [
- { url =
"https://files.pythonhosted.org/packages/0b/8b/6300fb80f858cda1c51ffa17075df5d846757081d11ab4aa35cef9e6258b/pytest-9.0.1-py3-none-any.whl",
hash =
"sha256:67be0030d194df2dfa7b556f2e56fb3c3315bd5c8822c6951162b92b32ce7dad", size
= 373668, upload-time = "2025-11-12T13:05:07.379Z" },
-]
-
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -1444,13 +1356,6 @@ benchmark = [
{ name = "torch" },
{ name = "tqdm" },
]
-dev = [
- { name = "maturin" },
- { name = "numpy" },
- { name = "patchelf" },
- { name = "pytest" },
- { name = "torch" },
-]
[package.metadata]
@@ -1465,16 +1370,9 @@ benchmark = [
{ name = "qiskit-aer", specifier = ">=0.17.2" },
{ name = "scikit-learn", specifier = ">=1.3" },
{ name = "tensorflow", specifier = ">=2.20" },
- { name = "torch", specifier = ">=2.2" },
+ { name = "torch", specifier = ">=2.2,<=2.9.0" },
{ name = "tqdm" },
]
-dev = [
- { name = "maturin", specifier = ">=1.10.2" },
- { name = "numpy", specifier = ">=1.24,<2.0" },
- { name = "patchelf", specifier = ">=0.17.2.4" },
- { name = "pytest", specifier = ">=9.0.1" },
- { name = "torch", specifier = ">=2.2" },
-]
[[package]]
name = "requests"
@@ -1813,33 +1711,6 @@ wheels = [
{ url =
"https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl",
hash =
"sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size
= 18638, upload-time = "2025-03-13T13:49:21.846Z" },
]
-[[package]]
-name = "tomli"
-version = "2.4.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url =
"https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz",
hash =
"sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size
= 17477, upload-time = "2026-01-11T11:22:38.165Z" }
-wheels = [
- { url =
"https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl",
hash =
"sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size
= 153663, upload-time = "2026-01-11T11:21:45.27Z" },
- { url =
"https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl",
hash =
"sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size
= 148469, upload-time = "2026-01-11T11:21:46.873Z" },
- { url =
"https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl",
hash =
"sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size
= 236039, upload-time = "2026-01-11T11:21:48.503Z" },
- { url =
"https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl",
hash =
"sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size
= 243007, upload-time = "2026-01-11T11:21:49.456Z" },
- { url =
"https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl",
hash =
"sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size
= 240875, upload-time = "2026-01-11T11:21:50.755Z" },
- { url =
"https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl",
hash =
"sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size
= 246271, upload-time = "2026-01-11T11:21:51.81Z" },
- { url =
"https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl",
hash =
"sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size
= 96770, upload-time = "2026-01-11T11:21:52.647Z" },
- { url =
"https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl",
hash =
"sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size
= 107626, upload-time = "2026-01-11T11:21:53.459Z" },
- { url =
"https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl",
hash =
"sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size
= 94842, upload-time = "2026-01-11T11:21:54.831Z" },
- { url =
"https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl",
hash =
"sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size
= 154894, upload-time = "2026-01-11T11:21:56.07Z" },
- { url =
"https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl",
hash =
"sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size
= 149053, upload-time = "2026-01-11T11:21:57.467Z" },
- { url =
"https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl",
hash =
"sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size
= 243481, upload-time = "2026-01-11T11:21:58.661Z" },
- { url =
"https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl",
hash =
"sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size
= 251720, upload-time = "2026-01-11T11:22:00.178Z" },
- { url =
"https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl",
hash =
"sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size
= 247014, upload-time = "2026-01-11T11:22:01.238Z" },
- { url =
"https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl",
hash =
"sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size
= 251820, upload-time = "2026-01-11T11:22:02.727Z" },
- { url =
"https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl",
hash =
"sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size
= 97712, upload-time = "2026-01-11T11:22:03.777Z" },
- { url =
"https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl",
hash =
"sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size
= 108296, upload-time = "2026-01-11T11:22:04.86Z" },
- { url =
"https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl",
hash =
"sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size
= 94553, upload-time = "2026-01-11T11:22:05.854Z" },
- { url =
"https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl",
hash =
"sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size
= 14477, upload-time = "2026-01-11T11:22:37.446Z" },
-]
-
[[package]]
name = "tomlkit"
version = "0.13.3"