This is an automated email from the ASF dual-hosted git repository.

bkietz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/master by this push:
     new 3360989  ARROW-8315: [Python] Fix dataset tests on Python 3.5
3360989 is described below

commit 3360989e04e99a4fcb50d7e068b6de469ca30716
Author: Antoine Pitrou <[email protected]>
AuthorDate: Thu Apr 2 13:46:37 2020 -0400

    ARROW-8315: [Python] Fix dataset tests on Python 3.5
    
    Do not rely on well-defined dict ordering.
    
    Closes #6814 from pitrou/ARROW-8315-dataset-tests-py35
    
    Authored-by: Antoine Pitrou <[email protected]>
    Signed-off-by: Benjamin Kietzman <[email protected]>
---
 python/pyarrow/tests/test_dataset.py | 10 ++++++----
 python/pyarrow/tests/test_schema.py  |  6 +-----
 2 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/python/pyarrow/tests/test_dataset.py 
b/python/pyarrow/tests/test_dataset.py
index 037ba52..dc28512 100644
--- a/python/pyarrow/tests/test_dataset.py
+++ b/python/pyarrow/tests/test_dataset.py
@@ -625,7 +625,8 @@ def _create_dataset_for_fragments(tempdir, chunk_size=None):
     import pyarrow.parquet as pq
 
     table = pa.table(
-        {'f1': range(8), 'f2': [1] * 8, 'part': ['a'] * 4 + ['b'] * 4}
+        [range(8), [1] * 8, ['a'] * 4 + ['b'] * 4],
+        names=['f1', 'f2', 'part']
     )
     # write_to_dataset currently requires pandas
     pq.write_to_dataset(table, str(tempdir / "test_parquet_dataset"),
@@ -1176,13 +1177,14 @@ def test_specified_schema(tempdir):
 
     # Specifying schema with missing column
     schema = pa.schema([('a', 'int64')])
-    expected = pa.table({'a': [1, 2, 3]})
+    expected = pa.table([[1, 2, 3]], names=['a'])
     _check_dataset(schema, expected)
 
     # Specifying schema with additional column
     schema = pa.schema([('a', 'int64'), ('c', 'int32')])
-    expected = pa.table({'a': [1, 2, 3],
-                         'c': pa.array([None, None, None], type='int32')})
+    expected = pa.table([[1, 2, 3],
+                         pa.array([None, None, None], type='int32')],
+                        names=['a', 'c'])
     _check_dataset(schema, expected)
 
     # Specifying with incompatible schema
diff --git a/python/pyarrow/tests/test_schema.py 
b/python/pyarrow/tests/test_schema.py
index d2b1bd2..d4cbfb3 100644
--- a/python/pyarrow/tests/test_schema.py
+++ b/python/pyarrow/tests/test_schema.py
@@ -261,8 +261,7 @@ sapien. Quisque pretium vestibulum urna eu vehicula."""
                                     metadata={"key1": "value1"}),
                            pa.field("bar", "string", True,
                                     metadata={"key3": "value3"})],
-                          metadata={"key2": "value2",
-                                    "lorem": lorem})
+                          metadata={"lorem": lorem})
 
     assert my_schema.to_string() == """\
 foo: int32 not null
@@ -272,7 +271,6 @@ bar: string
   -- field metadata --
   key3: 'value3'
 -- schema metadata --
-key2: 'value2'
 lorem: '""" + lorem[:65] + "' + " + str(len(lorem) - 65)
 
     # Metadata that exactly fits
@@ -292,7 +290,6 @@ bar: string
   -- field metadata --
   key3: 'value3'
 -- schema metadata --
-key2: 'value2'
 lorem: '{}'""".format(lorem)
 
     assert my_schema.to_string(truncate_metadata=False,
@@ -300,7 +297,6 @@ lorem: '{}'""".format(lorem)
 foo: int32 not null
 bar: string
 -- schema metadata --
-key2: 'value2'
 lorem: '{}'""".format(lorem)
 
     assert my_schema.to_string(truncate_metadata=False,

Reply via email to