WeichenXu123 commented on code in PR #40724:
URL: https://github.com/apache/spark/pull/40724#discussion_r1161659422


##########
python/pyspark/ml/torch/distributor.py:
##########
@@ -744,7 +800,44 @@ def run(self, train_object: Union[Callable, str], *args: 
Any) -> Optional[Any]:
                 TorchDistributor._run_training_on_pytorch_function  # type: 
ignore
             )
         if self.local_mode:
-            output = self._run_local_training(framework_wrapper_fn, 
train_object, *args)
+            output = self._run_local_training(framework_wrapper_fn, 
train_object, *args, **kwargs)
         else:
-            output = self._run_distributed_training(framework_wrapper_fn, 
train_object, *args)
+            output = self._run_distributed_training(
+                framework_wrapper_fn, train_object, None, *args, **kwargs
+            )
         return output
+
+    def train_on_dataframe(self, train_function, spark_dataframe, *args, 
**kwargs):
+        if self.local_mode:
+            raise ValueError(
+                "`TorchDistributor.train_on_dataframe` requires setting 
`TorchDistributor.local_mode` to `False`."
+            )
+
+        return self._run_distributed_training(
+            TorchDistributor._run_training_on_pytorch_function,
+            train_function,
+            spark_dataframe,
+            *args,
+            **kwargs
+        )
+
+
+def get_spark_partition_data_loader(num_samples, batch_size, prefetch=2):

Review Comment:
   This is the default value in pytorch DataLoader.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to