Github user HyukjinKwon commented on a diff in the pull request:
https://github.com/apache/spark/pull/19459#discussion_r144600676
--- Diff: python/pyspark/sql/session.py ---
@@ -510,9 +511,43 @@ def createDataFrame(self, data, schema=None,
samplingRatio=None, verifySchema=Tr
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
- if schema is None:
- schema = [str(x) for x in data.columns]
- data = [r.tolist() for r in data.to_records(index=False)]
+ if self.conf.get("spark.sql.execution.arrow.enabled",
"false").lower() == "true" \
+ and len(data) > 0:
+ from pyspark.serializers import ArrowSerializer
+ from pyspark.sql.types import from_arrow_schema
+ import pyarrow as pa
+
+ # Slice the DataFrame into batches
+ split = -(-len(data) //
self.sparkContext.defaultParallelism) # round int up
+ slices = (data[i:i + split] for i in xrange(0, len(data),
split))
--- End diff --
Yea, sounds fine.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]