allinux commented on code in PR #857:
URL: https://github.com/apache/datafusion-python/pull/857#discussion_r1748042239


##########
python/datafusion/dataframe.py:
##########
@@ -409,37 +409,62 @@ def except_all(self, other: DataFrame) -> DataFrame:
         """
         return DataFrame(self.df.except_all(other.df))
 
-    def write_csv(self, path: str | pathlib.Path, with_header: bool = False) 
-> None:
+    def write_csv(
+        self, 
+        path: str | pathlib.Path, 
+        with_header: bool = False,
+        write_options_overwrite: bool = False,
+        write_options_single_file_output: bool = False,
+        write_options_partition_by: List = [],
+    ) -> None:
         """Execute the :py:class:`DataFrame`  and write the results to a CSV 
file.
 
         Args:
             path: Path of the CSV file to write.
             with_header: If true, output the CSV header row.
+            write_options_overwrite: Controls if existing data should be 
overwritten
+            write_options_single_file_output: Controls if all partitions 
should be coalesced into a single output file. Generally will have slower 
performance when set to true.
+            write_options_partition_by: Sets which columns should be used for 
hive-style partitioned writes by name. Can be set to empty vec![] for 
non-partitioned writes.
         """
-        self.df.write_csv(str(path), with_header)
+        self.df.write_csv(str(path), with_header, write_options_overwrite, 
write_options_single_file_output, write_options_partition_by)
 
     def write_parquet(
         self,
         path: str | pathlib.Path,
         compression: str = "uncompressed",
         compression_level: int | None = None,
+        write_options_overwrite: bool = False,
+        write_options_single_file_output: bool = False,
+        write_options_partition_by: List = [],
     ) -> None:
         """Execute the :py:class:`DataFrame` and write the results to a 
Parquet file.
 
         Args:
             path: Path of the Parquet file to write.
             compression: Compression type to use.
             compression_level: Compression level to use.
+            write_options_overwrite: Controls if existing data should be 
overwritten
+            write_options_single_file_output: Controls if all partitions 
should be coalesced into a single output file. Generally will have slower 
performance when set to true.
+            write_options_partition_by: Sets which columns should be used for 
hive-style partitioned writes by name. Can be set to empty vec![] for 
non-partitioned writes.
         """
-        self.df.write_parquet(str(path), compression, compression_level)
+        self.df.write_parquet(str(path), compression, compression_level, 
write_options_overwrite, write_options_single_file_output, 
write_options_partition_by)
 
-    def write_json(self, path: str | pathlib.Path) -> None:
+    def write_json(
+        self, 
+        path: str | pathlib.Path,
+        write_options_overwrite: bool = False,
+        write_options_single_file_output: bool = False,
+        write_options_partition_by: List = [],
+    ) -> None:
         """Execute the :py:class:`DataFrame` and write the results to a JSON 
file.
 
         Args:
             path: Path of the JSON file to write.
+            write_options_overwrite: Controls if existing data should be 
overwritten
+            write_options_single_file_output: Controls if all partitions 
should be coalesced into a single output file. Generally will have slower 
performance when set to true.
+            write_options_partition_by: Sets which columns should be used for 
hive-style partitioned writes by name. Can be set to empty vec![] for 
non-partitioned writes.

Review Comment:
   The comment is a copy of Rust's comment. Lines containing vec![] have been 
removed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to