HyukjinKwon commented on code in PR #43798:
URL: https://github.com/apache/spark/pull/43798#discussion_r1393546849
##########
python/pyspark/sql/readwriter.py:
##########
@@ -1936,7 +1936,23 @@ def parquet(
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
- self._jwrite.parquet(path)
+ if self._df.isEmpty() is True and partitionBy is not None:
+ self._write_empty_partition(path, partitionBy)
+ else:
+ self._jwrite.parquet(path)
+
+ @overload
+ def _write_empty_partition(self, path: str, partitionBy: str) -> None:
+ ...
+
+ @overload
+ def _write_empty_partition(self, path: str, partitionBy: List[str]) ->
None:
+ ...
+
+ def _write_empty_partition(self, path: str, partitionBy: Union[str,
List[str]]) -> None:
+ temp_row_data = {col: None for col in partitionBy} if
isinstance(partitionBy, list) else {partitionBy: None}
+ temp_df =
self._df.sparkSession.createDataFrame([Row(**temp_row_data)])
+
temp_df.write.mode("overwrite").partitionBy(partitionBy).parquet(path)
Review Comment:
Does this work with Scala API too?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]