Copilot commented on code in PR #1029:
URL: https://github.com/apache/mahout/pull/1029#discussion_r2836150740


##########
qdp/qdp-python/src/lib.rs:
##########
@@ -1149,6 +1089,143 @@ impl QdpEngine {
             })?;
         Ok(PyQuantumLoader::new(Some(iter)))
     }
+
+    /// encode directly from a PyTorch CUDA tensor. Internal helper.
+    ///
+    /// Dispatches to the core f32 GPU pointer API for 1D float32 amplitude 
encoding,
+    /// or to the float64/basis GPU pointer APIs for other dtypes and batch 
encoding.
+    ///
+    /// Args:
+    ///     data: PyTorch CUDA tensor
+    ///     num_qubits: Number of qubits
+    ///     encoding_method: Encoding strategy (currently only "amplitude")
+    fn _encode_from_cuda_tensor(
+        &self,
+        data: &Bound<'_, PyAny>,
+        num_qubits: usize,
+        encoding_method: &str,
+    ) -> PyResult<QuantumTensor> {
+        // Validate CUDA tensor for direct GPU encoding (shape, contiguity, 
device, dtype)
+        validate_cuda_tensor_for_encoding(data, 
self.engine.device().ordinal(), encoding_method)?;
+
+        // Determine dtype for dispatch (float32 vs float64, etc.).
+        let dtype = data.getattr("dtype")?;
+        let dtype_str: String = dtype.str()?.extract()?;
+        let dtype_str_lower = dtype_str.to_ascii_lowercase();
+        let is_f32 = dtype_str_lower.contains("float32");
+        let method = encoding_method.to_ascii_lowercase();
+
+        // Current f32 CUDA path only supports amplitude encoding for 1D 
tensors.
+        let ndim: usize = data.call_method0("dim")?.extract()?;
+
+        if method.as_str() == "amplitude" && is_f32 {
+            match ndim {
+                1 => {
+                    // 1D CUDA tensor, float32 amplitude encoding using core 
f32 GPU pointer API.
+                    let input_len: usize = 
data.call_method0("numel")?.extract()?;
+                    if input_len == 0 {
+                        return Err(PyRuntimeError::new_err("CUDA tensor cannot 
be empty"));
+                    }
+
+                    let stream_ptr = get_torch_cuda_stream_ptr(data)?;
+                    let data_ptr_u64: u64 = 
data.call_method0("data_ptr")?.extract()?;
+                    if data_ptr_u64 == 0 {
+                        return Err(PyRuntimeError::new_err(
+                            "PyTorch returned a null data pointer for CUDA 
tensor",
+                        ));
+                    }
+                    let data_ptr = data_ptr_u64 as *const f32;
+
+                    let ptr = unsafe {
+                        self.engine
+                            .encode_from_gpu_ptr_f32_with_stream(
+                                data_ptr, input_len, num_qubits, stream_ptr,
+                            )
+                            .map_err(|e| {
+                                PyRuntimeError::new_err(format!(
+                                    "Encoding failed (float32 amplitude): {}",
+                                    e
+                                ))
+                            })?
+                    };
+
+                    Ok(QuantumTensor {
+                        ptr,
+                        consumed: false,
+                    })
+                }
+                2 => Err(PyRuntimeError::new_err(
+                    "CUDA float32 batch amplitude encoding is not yet 
supported. \

Review Comment:
   The error message says "CUDA float32 batch amplitude encoding is not yet 
supported" but this contradicts the fact that the core implementation 
(`encode_batch_from_gpu_ptr_f32`) is fully functional in this PR. Consider 
updating the message to clarify that the Python bindings specifically don't 
expose this yet, not that the underlying core lacks support. For example: "CUDA 
float32 batch amplitude encoding via Python is not yet exposed. Use float64 
(tensor.to(torch.float64)) or encode samples individually."
   ```suggestion
                       "CUDA float32 batch amplitude encoding via Python is not 
yet exposed. \
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to