pitrou commented on a change in pull request #11724: URL: https://github.com/apache/arrow/pull/11724#discussion_r756803994
########## File path: python/pyarrow/_parquet.pyx ########## @@ -1278,11 +1297,43 @@ cdef shared_ptr[WriterProperties] _create_writer_properties( if isinstance(use_byte_stream_split, bool): if use_byte_stream_split: - props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is not None: + raise ValueError( + "'column_encoding' is not None") + else: + props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) elif use_byte_stream_split is not None: for column in use_byte_stream_split: - props.encoding(tobytes(column), - ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is None: + column_encoding = {column: 'BYTE_STREAM_SPLIT'} + elif column_encoding.get(column, None) is None: + column_encoding[column]='BYTE_STREAM_SPLIT' + else: + raise ValueError( + "Column {0} is already specified in 'column_encoding'" + .format(column)) Review comment: Here as well, mention what the conflict is about? ########## File path: python/pyarrow/_parquet.pyx ########## @@ -1278,11 +1297,43 @@ cdef shared_ptr[WriterProperties] _create_writer_properties( if isinstance(use_byte_stream_split, bool): if use_byte_stream_split: - props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is not None: + raise ValueError( + "'column_encoding' is not None") + else: + props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) elif use_byte_stream_split is not None: for column in use_byte_stream_split: - props.encoding(tobytes(column), - ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is None: + column_encoding = {column: 'BYTE_STREAM_SPLIT'} + elif column_encoding.get(column, None) is None: + column_encoding[column]='BYTE_STREAM_SPLIT' + else: + raise ValueError( + "Column {0} is already specified in 'column_encoding'" + .format(column)) + + # column_encoding + # encoding map - encode individual columns + + if column_encoding is not None: + if isinstance(column_encoding, dict): + for column, _encoding in column_encoding.items(): + if encoding_enum_from_name(_encoding) is None: + raise ValueError("Unsupported column encoding: {0}" + .format(_encoding)) Review comment: Why don't you do this check in `encoding_enum_from_name` instead of duplicating it in several places here? ########## File path: python/pyarrow/_parquet.pyx ########## @@ -1278,11 +1297,43 @@ cdef shared_ptr[WriterProperties] _create_writer_properties( if isinstance(use_byte_stream_split, bool): if use_byte_stream_split: - props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is not None: + raise ValueError( + "'column_encoding' is not None") Review comment: This is a rather confusing error message. Perhaps explain the user what the conflicting options are? ########## File path: python/pyarrow/parquet.py ########## @@ -560,6 +560,15 @@ def _sanitize_table(table, new_schema, flavor): enabled, then dictionary is preferred. The byte_stream_split encoding is valid only for floating-point data types and should be combined with a compression codec. +column_encoding : string or dict, default None + Specify the encoding scheme on a per column basis. + Valid values: {'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', + 'DELTA_BINARY_PACKED', 'DELTA_BYTE_ARRAY'} + Unsupported encodings: DELTA_LENGTH_BYTE_ARRAY, PLAIN_DICTIONARY and + RLE_DICTIONARY. Last two options are already used by default. Review comment: It doesn't seem useful to list the unsupported encodings here; same for the "Last two options" comment, IMHO. ########## File path: python/pyarrow/parquet.py ########## @@ -560,6 +560,15 @@ def _sanitize_table(table, new_schema, flavor): enabled, then dictionary is preferred. The byte_stream_split encoding is valid only for floating-point data types and should be combined with a compression codec. +column_encoding : string or dict, default None + Specify the encoding scheme on a per column basis. + Valid values: {'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', + 'DELTA_BINARY_PACKED', 'DELTA_BYTE_ARRAY'} + Unsupported encodings: DELTA_LENGTH_BYTE_ARRAY, PLAIN_DICTIONARY and + RLE_DICTIONARY. Last two options are already used by default. + Certain encodings are only compatible with certain data types. + Please refer to the encodings section of `Reading and writing Parquet + files <https://arrow.apache.org/docs/cpp/parquet.html#encodings>`_. Review comment: +1 ########## File path: python/pyarrow/_parquet.pyx ########## @@ -1278,11 +1297,43 @@ cdef shared_ptr[WriterProperties] _create_writer_properties( if isinstance(use_byte_stream_split, bool): if use_byte_stream_split: - props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is not None: + raise ValueError( + "'column_encoding' is not None") + else: + props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) elif use_byte_stream_split is not None: for column in use_byte_stream_split: - props.encoding(tobytes(column), - ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is None: + column_encoding = {column: 'BYTE_STREAM_SPLIT'} + elif column_encoding.get(column, None) is None: + column_encoding[column]='BYTE_STREAM_SPLIT' + else: + raise ValueError( + "Column {0} is already specified in 'column_encoding'" + .format(column)) + + # column_encoding + # encoding map - encode individual columns + + if column_encoding is not None: + if isinstance(column_encoding, dict): + for column, _encoding in column_encoding.items(): + if encoding_enum_from_name(_encoding) is None: + raise ValueError("Unsupported column encoding: {0}" + .format(_encoding)) + else: + props.encoding(tobytes(column), + encoding_enum_from_name(_encoding)) + elif isinstance(column_encoding, str): + if encoding_enum_from_name(column_encoding) is None: + raise ValueError("Unsupported column encoding: {0}" + .format(column_encoding)) + else: + props.encoding(encoding_enum_from_name(column_encoding)) + else: + raise AttributeError( Review comment: Should be `TypeError` ########## File path: python/pyarrow/_parquet.pyx ########## @@ -1278,11 +1297,43 @@ cdef shared_ptr[WriterProperties] _create_writer_properties( if isinstance(use_byte_stream_split, bool): if use_byte_stream_split: - props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is not None: + raise ValueError( + "'column_encoding' is not None") + else: + props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) elif use_byte_stream_split is not None: for column in use_byte_stream_split: - props.encoding(tobytes(column), - ParquetEncoding_BYTE_STREAM_SPLIT) + if column_encoding is None: + column_encoding = {column: 'BYTE_STREAM_SPLIT'} + elif column_encoding.get(column, None) is None: + column_encoding[column]='BYTE_STREAM_SPLIT' Review comment: Hmm, can you ensure the code is PEP8-compliant? You should be able to do so with `archery lint --python`. (here, spaces should be added around `=`) -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org