pitrou commented on a change in pull request #9702:
URL: https://github.com/apache/arrow/pull/9702#discussion_r786735652



##########
File path: cpp/src/arrow/adapters/orc/adapter.cc
##########
@@ -628,41 +733,86 @@ class ArrowOutputStream : public liborc::OutputStream {
   int64_t length_;
 };
 
+Result<liborc::WriterOptions> MakeOrcWriterOptions(
+    arrow::adapters::orc::WriteOptions options) {
+  liborc::WriterOptions orc_options_;
+  orc_options_.setFileVersion(
+      liborc::FileVersion(static_cast<uint32_t>(options.file_version.major()),
+                          
static_cast<uint32_t>(options.file_version.minor())));
+  orc_options_.setStripeSize(static_cast<uint64_t>(options.stripe_size));
+  orc_options_.setCompressionBlockSize(
+      static_cast<uint64_t>(options.compression_block_size));
+  orc_options_.setCompressionStrategy(static_cast<liborc::CompressionStrategy>(
+      static_cast<int8_t>(options.compression_strategy)));
+  
orc_options_.setRowIndexStride(static_cast<uint64_t>(options.row_index_stride));
+  orc_options_.setPaddingTolerance(options.padding_tolerance);
+  
orc_options_.setDictionaryKeySizeThreshold(options.dictionary_key_size_threshold);
+  orc_options_.setPaddingTolerance(options.padding_tolerance);
+  std::set<uint64_t> orc_bloom_filter_columns_;
+  std::for_each(options.bloom_filter_columns.begin(), 
options.bloom_filter_columns.end(),
+                [&orc_bloom_filter_columns_](const int64_t col) {
+                  orc_bloom_filter_columns_.insert(static_cast<uint64_t>(col));
+                });
+  orc_options_.setColumnsUseBloomFilter(orc_bloom_filter_columns_);
+  orc_options_.setBloomFilterFPP(options.bloom_filter_fpp);
+  switch (options.compression) {
+    case Compression::UNCOMPRESSED:
+      
orc_options_.setCompression(liborc::CompressionKind::CompressionKind_NONE);
+      break;
+    case Compression::GZIP:
+      
orc_options_.setCompression(liborc::CompressionKind::CompressionKind_ZLIB);
+      break;
+    case Compression::SNAPPY:
+      
orc_options_.setCompression(liborc::CompressionKind::CompressionKind_SNAPPY);
+      break;
+    case Compression::LZ4:
+      
orc_options_.setCompression(liborc::CompressionKind::CompressionKind_LZ4);
+      break;
+    case Compression::ZSTD:
+      
orc_options_.setCompression(liborc::CompressionKind::CompressionKind_ZSTD);
+      break;
+    default:
+      return Status::Invalid("Compression type not supported by ORC");
+  }
+  return orc_options_;
+}
+
 }  // namespace
 
 class ORCFileWriter::Impl {
  public:
-  Status Open(arrow::io::OutputStream* output_stream) {
+  Status Open(arrow::io::OutputStream* output_stream, const WriteOptions& 
write_options) {
     out_stream_ = std::unique_ptr<liborc::OutputStream>(
         checked_cast<liborc::OutputStream*>(new 
ArrowOutputStream(*output_stream)));
+    write_options_ = write_options;
     return Status::OK();
   }
 
   Status Write(const Table& table) {
-    std::unique_ptr<liborc::WriterOptions> orc_options =
-        std::unique_ptr<liborc::WriterOptions>(new liborc::WriterOptions());
     ARROW_ASSIGN_OR_RAISE(auto orc_schema, GetOrcType(*(table.schema())));
+    ARROW_ASSIGN_OR_RAISE(auto orc_options_, 
MakeOrcWriterOptions(write_options_));
+    auto batch_size_ = static_cast<uint64_t>(write_options_.batch_size);

Review comment:
       If this is a local variable, it shouldn't end with an underscore, hence 
`batch_size`.

##########
File path: cpp/src/arrow/adapters/orc/adapter.cc
##########
@@ -628,41 +733,86 @@ class ArrowOutputStream : public liborc::OutputStream {
   int64_t length_;
 };
 
+Result<liborc::WriterOptions> MakeOrcWriterOptions(
+    arrow::adapters::orc::WriteOptions options) {
+  liborc::WriterOptions orc_options_;
+  orc_options_.setFileVersion(
+      liborc::FileVersion(static_cast<uint32_t>(options.file_version.major()),
+                          
static_cast<uint32_t>(options.file_version.minor())));
+  orc_options_.setStripeSize(static_cast<uint64_t>(options.stripe_size));
+  orc_options_.setCompressionBlockSize(
+      static_cast<uint64_t>(options.compression_block_size));
+  orc_options_.setCompressionStrategy(static_cast<liborc::CompressionStrategy>(
+      static_cast<int8_t>(options.compression_strategy)));
+  
orc_options_.setRowIndexStride(static_cast<uint64_t>(options.row_index_stride));
+  orc_options_.setPaddingTolerance(options.padding_tolerance);
+  
orc_options_.setDictionaryKeySizeThreshold(options.dictionary_key_size_threshold);
+  orc_options_.setPaddingTolerance(options.padding_tolerance);

Review comment:
       Why is this set twice?

##########
File path: cpp/src/arrow/adapters/orc/adapter.cc
##########
@@ -628,41 +733,86 @@ class ArrowOutputStream : public liborc::OutputStream {
   int64_t length_;
 };
 
+Result<liborc::WriterOptions> MakeOrcWriterOptions(
+    arrow::adapters::orc::WriteOptions options) {
+  liborc::WriterOptions orc_options_;
+  orc_options_.setFileVersion(
+      liborc::FileVersion(static_cast<uint32_t>(options.file_version.major()),
+                          
static_cast<uint32_t>(options.file_version.minor())));
+  orc_options_.setStripeSize(static_cast<uint64_t>(options.stripe_size));
+  orc_options_.setCompressionBlockSize(
+      static_cast<uint64_t>(options.compression_block_size));
+  orc_options_.setCompressionStrategy(static_cast<liborc::CompressionStrategy>(
+      static_cast<int8_t>(options.compression_strategy)));
+  
orc_options_.setRowIndexStride(static_cast<uint64_t>(options.row_index_stride));
+  orc_options_.setPaddingTolerance(options.padding_tolerance);
+  
orc_options_.setDictionaryKeySizeThreshold(options.dictionary_key_size_threshold);
+  orc_options_.setPaddingTolerance(options.padding_tolerance);
+  std::set<uint64_t> orc_bloom_filter_columns_;
+  std::for_each(options.bloom_filter_columns.begin(), 
options.bloom_filter_columns.end(),
+                [&orc_bloom_filter_columns_](const int64_t col) {
+                  orc_bloom_filter_columns_.insert(static_cast<uint64_t>(col));
+                });
+  orc_options_.setColumnsUseBloomFilter(orc_bloom_filter_columns_);

Review comment:
       ```suggestion
     
orc_options_.setColumnsUseBloomFilter(std::move(orc_bloom_filter_columns_));
   ```

##########
File path: cpp/src/arrow/testing/random.h
##########
@@ -322,6 +322,16 @@ class ARROW_TESTING_EXPORT RandomArrayGenerator {
   std::shared_ptr<Array> FixedSizeBinary(int64_t size, int32_t byte_width,
                                          double null_probability = 0);
 
+  // /// \brief Generate a random StructArray

Review comment:
       Why the doubled comment+docstring prefix?

##########
File path: cpp/src/arrow/adapters/orc/adapter.cc
##########
@@ -628,41 +733,86 @@ class ArrowOutputStream : public liborc::OutputStream {
   int64_t length_;
 };
 
+Result<liborc::WriterOptions> MakeOrcWriterOptions(
+    arrow::adapters::orc::WriteOptions options) {
+  liborc::WriterOptions orc_options_;
+  orc_options_.setFileVersion(
+      liborc::FileVersion(static_cast<uint32_t>(options.file_version.major()),
+                          
static_cast<uint32_t>(options.file_version.minor())));
+  orc_options_.setStripeSize(static_cast<uint64_t>(options.stripe_size));
+  orc_options_.setCompressionBlockSize(
+      static_cast<uint64_t>(options.compression_block_size));
+  orc_options_.setCompressionStrategy(static_cast<liborc::CompressionStrategy>(
+      static_cast<int8_t>(options.compression_strategy)));
+  
orc_options_.setRowIndexStride(static_cast<uint64_t>(options.row_index_stride));
+  orc_options_.setPaddingTolerance(options.padding_tolerance);
+  
orc_options_.setDictionaryKeySizeThreshold(options.dictionary_key_size_threshold);
+  orc_options_.setPaddingTolerance(options.padding_tolerance);
+  std::set<uint64_t> orc_bloom_filter_columns_;

Review comment:
       ```suggestion
     std::set<uint64_t> orc_bloom_filter_columns;
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton

Review comment:
       Is this comment informative here?

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+
+    if batch_size is not None:
+        if isinstance(batch_size, int) and batch_size > 0:
+            deref(options).batch_size = batch_size
+        else:
+            raise ValueError("Invalid ORC writer batch size: {0}"
+                             .format(batch_size))
+
+    # file_version
+
+    if file_version is not None:
+        if str(file_version) == "0.12":
+            deref(options).file_version = FileVersion(0, 12)
+        elif str(file_version) == "0.11":
+            deref(options).file_version = FileVersion(0, 11)
+        else:
+            raise ValueError("Unsupported ORC file version: {0}"
+                             .format(file_version))
+
+    # stripe_size
+
+    if stripe_size is not None:
+        if isinstance(stripe_size, int) and stripe_size > 0:
+            deref(options).stripe_size = stripe_size
+        else:
+            raise ValueError("Invalid ORC stripe size: {0}"
+                             .format(stripe_size))
+
+    # compression
+
+    if compression is not None:
+        if isinstance(compression, basestring):
+            deref(options).compression = compression_type_from_name(
+                compression)
+        else:
+            raise TypeError("Unsupported ORC compression type: {0}"
+                            .format(compression))
+
+    # compression_block_size
+
+    if compression_block_size is not None:
+        if (isinstance(compression_block_size, int) and
+                compression_block_size > 0):
+            deref(options).compression_block_size = compression_block_size
+        else:
+            raise ValueError("Invalid ORC compression block size: {0}"
+                             .format(compression_block_size))
+
+    # compression_strategy
+
+    if compression_strategy is not None:
+        if isinstance(compression, basestring):

Review comment:
       ```suggestion
           if isinstance(compression, str):
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+
+    if batch_size is not None:
+        if isinstance(batch_size, int) and batch_size > 0:
+            deref(options).batch_size = batch_size
+        else:
+            raise ValueError("Invalid ORC writer batch size: {0}"
+                             .format(batch_size))
+
+    # file_version
+
+    if file_version is not None:
+        if str(file_version) == "0.12":
+            deref(options).file_version = FileVersion(0, 12)
+        elif str(file_version) == "0.11":
+            deref(options).file_version = FileVersion(0, 11)
+        else:
+            raise ValueError("Unsupported ORC file version: {0}"
+                             .format(file_version))
+
+    # stripe_size
+
+    if stripe_size is not None:
+        if isinstance(stripe_size, int) and stripe_size > 0:
+            deref(options).stripe_size = stripe_size
+        else:
+            raise ValueError("Invalid ORC stripe size: {0}"
+                             .format(stripe_size))
+
+    # compression
+
+    if compression is not None:
+        if isinstance(compression, basestring):
+            deref(options).compression = compression_type_from_name(
+                compression)
+        else:
+            raise TypeError("Unsupported ORC compression type: {0}"
+                            .format(compression))
+
+    # compression_block_size
+
+    if compression_block_size is not None:
+        if (isinstance(compression_block_size, int) and
+                compression_block_size > 0):
+            deref(options).compression_block_size = compression_block_size
+        else:
+            raise ValueError("Invalid ORC compression block size: {0}"
+                             .format(compression_block_size))
+
+    # compression_strategy
+
+    if compression_strategy is not None:
+        if isinstance(compression, basestring):
+            deref(options).compression_strategy = \
+                compression_strategy_from_name(compression_strategy)
+        else:
+            raise TypeError("Unsupported ORC compression strategy: {0}"
+                            .format(compression_strategy))
+
+    # row_index_stride
+
+    if row_index_stride is not None:
+        if isinstance(row_index_stride, int) and row_index_stride > 0:
+            deref(options).row_index_stride = row_index_stride
+        else:
+            raise ValueError("Invalid ORC row index stride: {0}"
+                             .format(row_index_stride))
+
+    # padding_tolerance
+
+    if padding_tolerance is not None:
+        try:
+            padding_tolerance = float(padding_tolerance)
+            deref(options).padding_tolerance = padding_tolerance
+        except Exception:
+            raise ValueError("Invalid ORC padding tolerance: {0}"
+                             .format(padding_tolerance))
+
+    # dictionary_key_size_threshold
+
+    if dictionary_key_size_threshold is not None:
+        try:
+            dictionary_key_size_threshold = float(
+                dictionary_key_size_threshold)
+            deref(options).dictionary_key_size_threshold = \
+                dictionary_key_size_threshold
+        except Exception:
+            raise ValueError("Invalid ORC dictionary key size threshold: {0}"
+                             .format(dictionary_key_size_threshold))
+
+    # bloom_filter_columns
+
+    if bloom_filter_columns is not None:
+        try:
+            bloom_filter_columns = set(bloom_filter_columns)

Review comment:
       Cast to `list` instead?

##########
File path: python/pyarrow/orc.py
##########
@@ -117,21 +185,92 @@ def read(self, columns=None):
         return self.reader.read(columns=columns)
 
 
+_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12"
+    Determine which ORC file version to use. Hive 0.11 / ORC v0 is the older
+    version as defined `here <https://orc.apache.org/specification/ORCv0/>`
+    while Hive 0.12 / ORC v1 is the newer one as defined
+    `here <https://orc.apache.org/specification/ORCv1/>`.
+batch_size : int, default 1024
+    Number of rows the ORC writer writes at a time.
+stripe_size : int, default 64 * 1024 * 1024
+    Size of each ORC stripe.
+compression : string, default 'zlib'
+    Specify the compression codec.
+    Valid values: {'UNCOMPRESSED', 'SNAPPY', 'ZLIB', 'LZ0', 'LZ4', 'ZSTD'}
+compression_block_size : int, default 64 * 1024
+    Specify the size of each compression block.
+compression_strategy : string, default 'speed'
+    Specify the compression strategy i.e. speed vs size reduction.
+    Valid values: {'SPEED', 'COMPRESSION'}
+row_index_stride : int, default 10000
+    Specify the row index stride i.e. the number of rows per
+    an entry in the row index.
+padding_tolerance : double, default 0.0
+    Set the padding tolerance.
+dictionary_key_size_threshold : double, default 0.0
+    Set the dictionary key size threshold. 0 to disable dictionary encoding.
+    1 to always enable dictionary encoding.
+bloom_filter_columns : None, set-like or list-like, default None
+    Set columns that use the bloom filter.
+bloom_filter_fpp: double, default 0.05
+    Set false positive probability of the bloom filter.
+"""
+
+
 class ORCWriter:
-    """
-    Writer interface for a single ORC file
+    __doc__ = """
+Writer interface for a single ORC file
 
-    Parameters
-    ----------
-    where : str or pyarrow.io.NativeFile
-        Writable target. For passing Python file objects or byte buffers,
-        see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
-        or pyarrow.io.FixedSizeBufferWriter.
-    """
+Parameters
+----------
+where : str or pyarrow.io.NativeFile
+    Writable target. For passing Python file objects or byte buffers,
+    see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
+    or pyarrow.io.FixedSizeBufferWriter.
+{}
+""".format(_orc_writer_args_docs)
+
+    def __init__(self, where, file_version='0.12',

Review comment:
       Let's make all options keyword-only to discourage write-only code.
   
   ```suggestion
       def __init__(self, where, *, file_version='0.12',
   ```

##########
File path: cpp/src/arrow/adapters/orc/options.h
##########
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <set>
+#include <sstream>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/util/type_fwd.h"
+#include "arrow/util/visibility.h"
+
+namespace arrow {
+
+namespace adapters {
+
+namespace orc {
+
+enum class WriterId {
+  kOrcJava = 0,
+  kOrcCpp = 1,
+  kPresto = 2,
+  kScritchleyGo = 3,
+  kTrino = 4,
+  kUnknown = INT32_MAX
+};
+
+enum class WriterVersion {

Review comment:
       ```suggestion
   enum class WriterVersion : int32_t {
   ```

##########
File path: cpp/src/arrow/adapters/orc/options.h
##########
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <set>
+#include <sstream>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/util/type_fwd.h"
+#include "arrow/util/visibility.h"
+
+namespace arrow {
+
+namespace adapters {
+
+namespace orc {
+
+enum class WriterId {
+  kOrcJava = 0,
+  kOrcCpp = 1,
+  kPresto = 2,
+  kScritchleyGo = 3,
+  kTrino = 4,
+  kUnknown = INT32_MAX
+};
+
+enum class WriterVersion {
+  kOriginal = 0,
+  kHive8732 = 1,
+  kHive4243 = 2,
+  kHive12055 = 3,
+  kHive13083 = 4,
+  kOrc101 = 5,
+  kOrc135 = 6,
+  kOrc517 = 7,
+  kOrc203 = 8,
+  kOrc14 = 9,
+  kMax = INT32_MAX
+};
+
+enum class CompressionStrategy { kSpeed = 0, kCompression };
+
+enum class RleVersion { k1 = 0, k2 = 1 };
+
+enum class BloomFilterVersion {
+  // Include both the BLOOM_FILTER and BLOOM_FILTER_UTF8 streams to support
+  // both old and new readers.
+  kOriginal = 0,
+  // Only include the BLOOM_FILTER_UTF8 streams that consistently use UTF8.
+  // See ORC-101
+  kUtf8 = 1,
+  kFuture = INT32_MAX
+};
+
+class ARROW_EXPORT FileVersion {
+ private:
+  int32_t major_version;
+  int32_t minor_version;
+
+ public:
+  static const FileVersion& v_0_11();
+  static const FileVersion& v_0_12();
+
+  FileVersion(int32_t major, int32_t minor)
+      : major_version(major), minor_version(minor) {}
+
+  /**
+   * Get major version
+   */
+  int32_t major() const { return this->major_version; }
+
+  /**
+   * Get minor version
+   */
+  int32_t minor() const { return this->minor_version; }
+
+  bool operator==(const FileVersion& right) const {
+    return this->major_version == right.major() && this->minor_version == 
right.minor();
+  }
+
+  bool operator!=(const FileVersion& right) const { return !(*this == right); }
+
+  std::string ToString() const {
+    std::stringstream ss;
+    ss << major() << '.' << minor();
+    return ss.str();
+  }
+};
+
+/// Options for the ORC Writer
+struct ARROW_EXPORT WriteOptions {
+  /// Number of rows the ORC writer writes at a time, default 1024
+  int64_t batch_size = 1024;
+  /// Which ORC file version to use, default FileVersion(0, 12)
+  FileVersion file_version = FileVersion(0, 12);
+  /// Size of each ORC stripe, default 67108864
+  int64_t stripe_size = 67108864;
+  /// The compression codec of the ORC file, default Compression::GZIP
+  Compression::type compression = Compression::GZIP;
+  /// The size of each compression block, default 65536
+  int64_t compression_block_size = 65536;
+  /// The compression strategy i.e. speed vs size reduction, default
+  /// CompressionStrategy::kSpeed
+  CompressionStrategy compression_strategy = CompressionStrategy::kSpeed;
+  /// The number of rows per an entry in the row index, default 10000
+  int64_t row_index_stride = 10000;
+  /// The padding tolerance, default 0.0
+  double padding_tolerance = 0.0;
+  /// The dictionary key size threshold. 0 to disable dictionary encoding.
+  /// 1 to always enable dictionary encoding, default 0.0
+  double dictionary_key_size_threshold = 0.0;
+  /// The set of columns that use the bloom filter, default empty
+  std::set<int64_t> bloom_filter_columns;

Review comment:
       You could make this `std::vector`, it will probably be much cheaper, and 
you can then remove the `<set>` inclusion.

##########
File path: cpp/src/arrow/adapters/orc/options.h
##########
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <set>
+#include <sstream>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/util/type_fwd.h"
+#include "arrow/util/visibility.h"
+
+namespace arrow {
+
+namespace adapters {
+
+namespace orc {
+
+enum class WriterId {
+  kOrcJava = 0,
+  kOrcCpp = 1,
+  kPresto = 2,
+  kScritchleyGo = 3,
+  kTrino = 4,
+  kUnknown = INT32_MAX
+};
+
+enum class WriterVersion {
+  kOriginal = 0,
+  kHive8732 = 1,
+  kHive4243 = 2,
+  kHive12055 = 3,
+  kHive13083 = 4,
+  kOrc101 = 5,
+  kOrc135 = 6,
+  kOrc517 = 7,
+  kOrc203 = 8,
+  kOrc14 = 9,
+  kMax = INT32_MAX
+};
+
+enum class CompressionStrategy { kSpeed = 0, kCompression };
+
+enum class RleVersion { k1 = 0, k2 = 1 };

Review comment:
       Is this used anywhere?

##########
File path: cpp/src/arrow/adapters/orc/options.h
##########
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <set>
+#include <sstream>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/util/type_fwd.h"
+#include "arrow/util/visibility.h"
+
+namespace arrow {
+
+namespace adapters {
+
+namespace orc {
+
+enum class WriterId {

Review comment:
       ```suggestion
   enum class WriterId : int32_t {
   ```

##########
File path: python/pyarrow/tests/test_orc.py
##########
@@ -190,6 +195,271 @@ def test_orcfile_readwrite():
     orc_file = orc.ORCFile(buffer_reader)
     output_table = orc_file.read()
     assert table.equals(output_table)
+    # Check for default WriteOptions
+    assert orc_file.compression == 'ZLIB'
+    assert orc_file.file_version == '0.12'
+    assert orc_file.row_index_stride == 10000
+    assert orc_file.compression_size == 65536
+
+
+def test_orcfile_readwrite_with_writeoptions():
+    from pyarrow import orc
+
+    buffer_output_stream = pa.BufferOutputStream()
+    a = pa.array([1, None, 3, None])
+    b = pa.array([None, "Arrow", None, "ORC"])
+    table = pa.table({"int64": a, "utf8": b})
+    orc.write_table(
+        table,
+        buffer_output_stream,
+        compression='snappy',
+        file_version='0.11',
+        row_index_stride=5000,
+        compression_block_size=32768,
+    )
+    buffer_reader = pa.BufferReader(buffer_output_stream.getvalue())
+    orc_file = orc.ORCFile(buffer_reader)
+    output_table = orc_file.read()
+    assert table.equals(output_table)
+    # Check for default WriteOptions

Review comment:
       Not "defaul" here

##########
File path: cpp/src/arrow/adapters/orc/adapter.cc
##########
@@ -628,41 +733,86 @@ class ArrowOutputStream : public liborc::OutputStream {
   int64_t length_;
 };
 
+Result<liborc::WriterOptions> MakeOrcWriterOptions(
+    arrow::adapters::orc::WriteOptions options) {
+  liborc::WriterOptions orc_options_;

Review comment:
       ```suggestion
     liborc::WriterOptions orc_options;
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',

Review comment:
       `pyarrow.parquet` uses "GZIP", is there a reason to expose this as 
"ZLIB" for Orc instead?

##########
File path: cpp/src/arrow/adapters/orc/options.h
##########
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <set>
+#include <sstream>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/util/type_fwd.h"
+#include "arrow/util/visibility.h"
+
+namespace arrow {
+
+namespace adapters {
+
+namespace orc {
+
+enum class WriterId {
+  kOrcJava = 0,
+  kOrcCpp = 1,
+  kPresto = 2,
+  kScritchleyGo = 3,
+  kTrino = 4,
+  kUnknown = INT32_MAX
+};
+
+enum class WriterVersion {
+  kOriginal = 0,
+  kHive8732 = 1,
+  kHive4243 = 2,
+  kHive12055 = 3,
+  kHive13083 = 4,
+  kOrc101 = 5,
+  kOrc135 = 6,
+  kOrc517 = 7,
+  kOrc203 = 8,
+  kOrc14 = 9,
+  kMax = INT32_MAX
+};
+
+enum class CompressionStrategy { kSpeed = 0, kCompression };
+
+enum class RleVersion { k1 = 0, k2 = 1 };
+
+enum class BloomFilterVersion {

Review comment:
       Is this used anywhere?

##########
File path: cpp/src/arrow/adapters/orc/adapter_test.cc
##########
@@ -226,19 +224,54 @@ std::shared_ptr<Table> GenerateRandomTable(const 
std::shared_ptr<Schema>& schema
   return Table::Make(schema, cv);
 }
 
+arrow::adapters::orc::WriteOptions GenerateRandomWriteOptions(int64_t 
num_cols) {
+  auto arrow_write_options = arrow::adapters::orc::WriteOptions();
+  arrow_write_options.batch_size = arrow::random_single_int<int64_t, 
int64_t>(4ull, 8ull);
+  arrow_write_options.file_version = arrow::adapters::orc::FileVersion(
+      0, arrow::random_single_int<int32_t, int32_t>(11, 12));
+  arrow_write_options.stripe_size =
+      arrow::random_single_int<int64_t, int64_t>(4ull, 128ull);
+  arrow_write_options.compression_block_size =
+      arrow::random_single_int<int64_t, int64_t>(4ull, 128ull);
+  arrow_write_options.row_index_stride =
+      arrow::random_single_int<int64_t, int64_t>(0, 128ull);
+  arrow_write_options.compression =
+      arrow::random_single_int<int8_t, arrow::Compression::type>(0, 2);
+  arrow_write_options.compression_strategy =
+      arrow::random_single_int<int8_t, 
arrow::adapters::orc::CompressionStrategy>(0, 1);
+  arrow_write_options.padding_tolerance = arrow::random_single_real<double, 
double>(0, 1);
+  arrow_write_options.dictionary_key_size_threshold =
+      arrow::random_single_real<double, double>(0, 1);
+  arrow_write_options.bloom_filter_fpp = arrow::random_single_real<double, 
double>(0, 1);
+  std::set<int64_t> bloom_filter_cols;
+  for (int64_t i = 0; i < num_cols; i++) {
+    if (arrow::random_single_int<int8_t, int8_t>(0, 1) == 1) {
+      bloom_filter_cols.insert(i);
+    }
+  }
+  arrow_write_options.bloom_filter_columns = bloom_filter_cols;
+  return arrow_write_options;
+}
+
 void AssertTableWriteReadEqual(const std::shared_ptr<Table>& input_table,
                                const std::shared_ptr<Table>& 
expected_output_table,
                                const int64_t max_size = 
kDefaultSmallMemStreamSize) {
   EXPECT_OK_AND_ASSIGN(auto buffer_output_stream,
                        io::BufferOutputStream::Create(max_size));
-  EXPECT_OK_AND_ASSIGN(auto writer,
-                       
adapters::orc::ORCFileWriter::Open(buffer_output_stream.get()));
+  arrow::adapters::orc::WriteOptions write_options =
+      GenerateRandomWriteOptions(input_table->num_columns());

Review comment:
       I'm not sure I understand the case for testing random write options, but 
only once (if you get a sporadic failure you'll be hard-pressed to reproduce 
it). Why not test with well-known values instead (two or three different sets 
of them, for example)?

##########
File path: cpp/src/arrow/adapters/orc/options.h
##########
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <set>
+#include <sstream>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/util/type_fwd.h"
+#include "arrow/util/visibility.h"
+
+namespace arrow {
+
+namespace adapters {
+
+namespace orc {
+
+enum class WriterId {
+  kOrcJava = 0,
+  kOrcCpp = 1,
+  kPresto = 2,
+  kScritchleyGo = 3,
+  kTrino = 4,
+  kUnknown = INT32_MAX
+};
+
+enum class WriterVersion {
+  kOriginal = 0,
+  kHive8732 = 1,
+  kHive4243 = 2,
+  kHive12055 = 3,
+  kHive13083 = 4,
+  kOrc101 = 5,
+  kOrc135 = 6,
+  kOrc517 = 7,
+  kOrc203 = 8,
+  kOrc14 = 9,
+  kMax = INT32_MAX
+};
+
+enum class CompressionStrategy { kSpeed = 0, kCompression };
+
+enum class RleVersion { k1 = 0, k2 = 1 };
+
+enum class BloomFilterVersion {
+  // Include both the BLOOM_FILTER and BLOOM_FILTER_UTF8 streams to support
+  // both old and new readers.
+  kOriginal = 0,
+  // Only include the BLOOM_FILTER_UTF8 streams that consistently use UTF8.
+  // See ORC-101
+  kUtf8 = 1,
+  kFuture = INT32_MAX
+};
+
+class ARROW_EXPORT FileVersion {
+ private:
+  int32_t major_version;
+  int32_t minor_version;
+
+ public:
+  static const FileVersion& v_0_11();
+  static const FileVersion& v_0_12();
+
+  FileVersion(int32_t major, int32_t minor)
+      : major_version(major), minor_version(minor) {}
+
+  /**
+   * Get major version
+   */
+  int32_t major() const { return this->major_version; }
+
+  /**
+   * Get minor version
+   */
+  int32_t minor() const { return this->minor_version; }
+
+  bool operator==(const FileVersion& right) const {
+    return this->major_version == right.major() && this->minor_version == 
right.minor();
+  }
+
+  bool operator!=(const FileVersion& right) const { return !(*this == right); }
+
+  std::string ToString() const {

Review comment:
       You can move the definition to the `.cc` file and remove the `<sstream>` 
inclusion.

##########
File path: cpp/src/arrow/testing/random.h
##########
@@ -461,6 +471,29 @@ ARROW_TESTING_EXPORT
 void rand_month_day_nanos(int64_t N,
                           
std::vector<MonthDayNanoIntervalType::MonthDayNanos>* out);
 
+/// \brief Generate a single integer, enum or enum class element within a 
given range
+///
+/// If enum or enum class is used the int value will be used
+///
+/// \param[in] lower the lower bound of the range
+/// \param[in] upper the upper bound of the range
+/// \return a generated integer, enum or enum class element
+template <typename T, typename U>
+U random_single_int(T lower, T upper) {
+  const int random_seed = 0;
+  std::default_random_engine gen(random_seed);

Review comment:
       This will essentially always generate the same output for a given pair 
of inputs...
   Can you just use `<random>` directly from your tests indeed?

##########
File path: cpp/src/arrow/adapters/orc/adapter_test.cc
##########
@@ -226,19 +224,54 @@ std::shared_ptr<Table> GenerateRandomTable(const 
std::shared_ptr<Schema>& schema
   return Table::Make(schema, cv);
 }
 
+arrow::adapters::orc::WriteOptions GenerateRandomWriteOptions(int64_t 
num_cols) {
+  auto arrow_write_options = arrow::adapters::orc::WriteOptions();
+  arrow_write_options.batch_size = arrow::random_single_int<int64_t, 
int64_t>(4ull, 8ull);
+  arrow_write_options.file_version = arrow::adapters::orc::FileVersion(
+      0, arrow::random_single_int<int32_t, int32_t>(11, 12));
+  arrow_write_options.stripe_size =
+      arrow::random_single_int<int64_t, int64_t>(4ull, 128ull);
+  arrow_write_options.compression_block_size =
+      arrow::random_single_int<int64_t, int64_t>(4ull, 128ull);
+  arrow_write_options.row_index_stride =
+      arrow::random_single_int<int64_t, int64_t>(0, 128ull);
+  arrow_write_options.compression =
+      arrow::random_single_int<int8_t, arrow::Compression::type>(0, 2);

Review comment:
       Not all compression kinds may be enabled, so you'd need to skip the test 
selectively if the given compression type isn't enabled.

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')

Review comment:
       Same here: raise an error?

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))

Review comment:
       ```suggestion
       raise ValueError(f'Unknown CompressionStrategy: {name}')
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')

Review comment:
       Instead of returning "UNKNOWN", it would be better to raise an error 
instead (probably a `ValueError`).

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))

Review comment:
       We can use f-strings in PyArrow now.
   
   ```suggestion
       raise ValueError(f'Unknown CompressionKind: {name}')
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+
+    if batch_size is not None:
+        if isinstance(batch_size, int) and batch_size > 0:
+            deref(options).batch_size = batch_size
+        else:
+            raise ValueError("Invalid ORC writer batch size: {0}"
+                             .format(batch_size))
+
+    # file_version
+
+    if file_version is not None:
+        if str(file_version) == "0.12":

Review comment:
       Why cast to `str`? It should already be a `str`, no?

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+
+    if batch_size is not None:
+        if isinstance(batch_size, int) and batch_size > 0:
+            deref(options).batch_size = batch_size
+        else:
+            raise ValueError("Invalid ORC writer batch size: {0}"
+                             .format(batch_size))

Review comment:
       ```suggestion
               raise ValueError(f"Invalid ORC writer batch size: {batch_size}")
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+

Review comment:
       Nit: no need to skip so many newlines.

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+
+    if batch_size is not None:
+        if isinstance(batch_size, int) and batch_size > 0:
+            deref(options).batch_size = batch_size
+        else:
+            raise ValueError("Invalid ORC writer batch size: {0}"
+                             .format(batch_size))
+
+    # file_version
+
+    if file_version is not None:
+        if str(file_version) == "0.12":
+            deref(options).file_version = FileVersion(0, 12)
+        elif str(file_version) == "0.11":
+            deref(options).file_version = FileVersion(0, 11)
+        else:
+            raise ValueError("Unsupported ORC file version: {0}"
+                             .format(file_version))
+
+    # stripe_size
+
+    if stripe_size is not None:
+        if isinstance(stripe_size, int) and stripe_size > 0:
+            deref(options).stripe_size = stripe_size
+        else:
+            raise ValueError("Invalid ORC stripe size: {0}"
+                             .format(stripe_size))
+
+    # compression
+
+    if compression is not None:
+        if isinstance(compression, basestring):
+            deref(options).compression = compression_type_from_name(
+                compression)
+        else:
+            raise TypeError("Unsupported ORC compression type: {0}"
+                            .format(compression))
+
+    # compression_block_size
+
+    if compression_block_size is not None:
+        if (isinstance(compression_block_size, int) and
+                compression_block_size > 0):
+            deref(options).compression_block_size = compression_block_size
+        else:
+            raise ValueError("Invalid ORC compression block size: {0}"
+                             .format(compression_block_size))
+
+    # compression_strategy
+
+    if compression_strategy is not None:
+        if isinstance(compression, basestring):
+            deref(options).compression_strategy = \
+                compression_strategy_from_name(compression_strategy)
+        else:
+            raise TypeError("Unsupported ORC compression strategy: {0}"
+                            .format(compression_strategy))
+
+    # row_index_stride
+
+    if row_index_stride is not None:
+        if isinstance(row_index_stride, int) and row_index_stride > 0:
+            deref(options).row_index_stride = row_index_stride
+        else:
+            raise ValueError("Invalid ORC row index stride: {0}"
+                             .format(row_index_stride))
+
+    # padding_tolerance
+
+    if padding_tolerance is not None:
+        try:
+            padding_tolerance = float(padding_tolerance)
+            deref(options).padding_tolerance = padding_tolerance
+        except Exception:
+            raise ValueError("Invalid ORC padding tolerance: {0}"
+                             .format(padding_tolerance))
+
+    # dictionary_key_size_threshold
+
+    if dictionary_key_size_threshold is not None:
+        try:
+            dictionary_key_size_threshold = float(
+                dictionary_key_size_threshold)
+            deref(options).dictionary_key_size_threshold = \
+                dictionary_key_size_threshold
+        except Exception:
+            raise ValueError("Invalid ORC dictionary key size threshold: {0}"
+                             .format(dictionary_key_size_threshold))
+
+    # bloom_filter_columns
+
+    if bloom_filter_columns is not None:
+        try:
+            bloom_filter_columns = set(bloom_filter_columns)
+            for col in bloom_filter_columns:
+                assert isinstance(col, int) and col >= 0
+            deref(options).bloom_filter_columns = bloom_filter_columns
+        except Exception:
+            raise ValueError("Invalid ORC BloomFilter columns: {0}"
+                             .format(bloom_filter_columns))
+
+    # False positive rate of the Bloom Filter
+
+    if bloom_filter_fpp is not None:
+        try:
+            bloom_filter_fpp = float(bloom_filter_fpp)
+            assert bloom_filter_fpp >= 0 and bloom_filter_fpp <= 1

Review comment:
       ```suggestion
               assert 0 <= bloom_filter_fpp <= 1
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+
+    if batch_size is not None:
+        if isinstance(batch_size, int) and batch_size > 0:
+            deref(options).batch_size = batch_size
+        else:
+            raise ValueError("Invalid ORC writer batch size: {0}"
+                             .format(batch_size))
+
+    # file_version
+
+    if file_version is not None:
+        if str(file_version) == "0.12":
+            deref(options).file_version = FileVersion(0, 12)
+        elif str(file_version) == "0.11":
+            deref(options).file_version = FileVersion(0, 11)
+        else:
+            raise ValueError("Unsupported ORC file version: {0}"
+                             .format(file_version))
+
+    # stripe_size
+
+    if stripe_size is not None:
+        if isinstance(stripe_size, int) and stripe_size > 0:
+            deref(options).stripe_size = stripe_size
+        else:
+            raise ValueError("Invalid ORC stripe size: {0}"
+                             .format(stripe_size))
+
+    # compression
+
+    if compression is not None:
+        if isinstance(compression, basestring):

Review comment:
       ```suggestion
           if isinstance(compression, str):
   ```

##########
File path: cpp/src/arrow/adapters/orc/options.h
##########
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <set>
+#include <sstream>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/util/type_fwd.h"
+#include "arrow/util/visibility.h"
+
+namespace arrow {
+
+namespace adapters {
+
+namespace orc {
+
+enum class WriterId {
+  kOrcJava = 0,
+  kOrcCpp = 1,
+  kPresto = 2,
+  kScritchleyGo = 3,
+  kTrino = 4,
+  kUnknown = INT32_MAX
+};
+
+enum class WriterVersion {
+  kOriginal = 0,
+  kHive8732 = 1,
+  kHive4243 = 2,
+  kHive12055 = 3,
+  kHive13083 = 4,
+  kOrc101 = 5,
+  kOrc135 = 6,
+  kOrc517 = 7,
+  kOrc203 = 8,
+  kOrc14 = 9,
+  kMax = INT32_MAX
+};
+
+enum class CompressionStrategy { kSpeed = 0, kCompression };
+
+enum class RleVersion { k1 = 0, k2 = 1 };
+
+enum class BloomFilterVersion {
+  // Include both the BLOOM_FILTER and BLOOM_FILTER_UTF8 streams to support
+  // both old and new readers.
+  kOriginal = 0,
+  // Only include the BLOOM_FILTER_UTF8 streams that consistently use UTF8.
+  // See ORC-101
+  kUtf8 = 1,
+  kFuture = INT32_MAX
+};
+
+class ARROW_EXPORT FileVersion {
+ private:
+  int32_t major_version;
+  int32_t minor_version;
+
+ public:
+  static const FileVersion& v_0_11();
+  static const FileVersion& v_0_12();
+
+  FileVersion(int32_t major, int32_t minor)
+      : major_version(major), minor_version(minor) {}
+
+  /**
+   * Get major version
+   */
+  int32_t major() const { return this->major_version; }
+
+  /**
+   * Get minor version
+   */
+  int32_t minor() const { return this->minor_version; }
+
+  bool operator==(const FileVersion& right) const {
+    return this->major_version == right.major() && this->minor_version == 
right.minor();
+  }
+
+  bool operator!=(const FileVersion& right) const { return !(*this == right); }
+
+  std::string ToString() const {
+    std::stringstream ss;
+    ss << major() << '.' << minor();
+    return ss.str();
+  }
+};
+
+/// Options for the ORC Writer
+struct ARROW_EXPORT WriteOptions {
+  /// Number of rows the ORC writer writes at a time, default 1024
+  int64_t batch_size = 1024;
+  /// Which ORC file version to use, default FileVersion(0, 12)
+  FileVersion file_version = FileVersion(0, 12);
+  /// Size of each ORC stripe, default 67108864
+  int64_t stripe_size = 67108864;
+  /// The compression codec of the ORC file, default Compression::GZIP
+  Compression::type compression = Compression::GZIP;
+  /// The size of each compression block, default 65536
+  int64_t compression_block_size = 65536;
+  /// The compression strategy i.e. speed vs size reduction, default
+  /// CompressionStrategy::kSpeed
+  CompressionStrategy compression_strategy = CompressionStrategy::kSpeed;
+  /// The number of rows per an entry in the row index, default 10000
+  int64_t row_index_stride = 10000;
+  /// The padding tolerance, default 0.0
+  double padding_tolerance = 0.0;
+  /// The dictionary key size threshold. 0 to disable dictionary encoding.
+  /// 1 to always enable dictionary encoding, default 0.0
+  double dictionary_key_size_threshold = 0.0;
+  /// The set of columns that use the bloom filter, default empty
+  std::set<int64_t> bloom_filter_columns;
+  /// False positive probability of the bloom filter, default 0.05

Review comment:
       I'm not sure I understand this, is it the _allowed_ false positive 
probability?

##########
File path: python/pyarrow/_orc.pxd
##########
@@ -54,9 +111,40 @@ cdef extern from "arrow/adapters/orc/adapter.h" \
 
         int64_t NumberOfRows()
 
+        FileVersion GetFileVersion()
+

Review comment:
       Nit: no need to skip lines after every C++ method declaration.

##########
File path: python/pyarrow/orc.py
##########
@@ -117,21 +185,92 @@ def read(self, columns=None):
         return self.reader.read(columns=columns)
 
 
+_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12"
+    Determine which ORC file version to use. Hive 0.11 / ORC v0 is the older
+    version as defined `here <https://orc.apache.org/specification/ORCv0/>`
+    while Hive 0.12 / ORC v1 is the newer one as defined
+    `here <https://orc.apache.org/specification/ORCv1/>`.

Review comment:
       Beware the reST syntax for hyperlinks: 
https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#embedded-uris-and-aliases
   
   ```suggestion
       version as defined `here <https://orc.apache.org/specification/ORCv0/>`_
       while Hive 0.12 / ORC v1 is the newer one as defined
       `here <https://orc.apache.org/specification/ORCv1/>`_.
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -100,6 +326,53 @@ cdef class ORCReader(_Weakrefable):
     def nstripes(self):
         return deref(self.reader).NumberOfStripes()
 
+    def file_version(self):
+        return file_version_from_class(deref(self.reader).GetFileVersion())
+
+    def software_version(self):
+        return frombytes(deref(self.reader).GetSoftwareVersion())
+
+    def compression(self):
+        return compression_type_from_enum(
+            GetResultValue(deref(self.reader).GetCompression()))
+
+    def compression_size(self):
+        return deref(self.reader).GetCompressionSize()
+
+    def row_index_stride(self):
+        return deref(self.reader).GetRowIndexStride()
+
+    def writer(self):
+        writer_name = writer_id_from_enum(deref(self.reader).GetWriterId())
+        if writer_name == 'UNKNOWN':
+            return deref(self.reader).GetWriterIdValue()
+        else:
+            return writer_name
+
+    def writer_version(self):
+        return writer_version_from_enum(deref(self.reader).GetWriterVersion())
+
+    def nstripe_statistics(self):
+        return deref(self.reader).GetNumberOfStripeStatistics()
+
+    def content_length(self):
+        return deref(self.reader).GetContentLength()
+
+    def stripe_statistics_length(self):
+        return deref(self.reader).GetStripeStatisticsLength()
+
+    def file_footer_length(self):
+        return deref(self.reader).GetFileFooterLength()
+
+    def file_postscript_length(self):
+        return deref(self.reader).GetFilePostscriptLength()
+
+    def file_length(self):
+        return deref(self.reader).GetFileLength()
+
+    def serialized_file_tail(self):
+        return frombytes(deref(self.reader).GetSerializedFileTail())

Review comment:
       If this is arbitrary binary data, it shouldn't be decoded as utf8, 
should it?

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -138,18 +411,46 @@ cdef class ORCReader(_Weakrefable):
 
         return pyarrow_wrap_table(sp_table)
 
+
 cdef class ORCWriter(_Weakrefable):
     cdef:
-        object source
+        object sink
         unique_ptr[ORCFileWriter] writer
         shared_ptr[COutputStream] rd_handle
 
-    def open(self, object source):
-        self.source = source
-        get_writer(source, &self.rd_handle)
+    def open(self, object sink, file_version=None,

Review comment:
       ```suggestion
       def open(self, object sink, *, file_version=None,
   ```

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+
+    if batch_size is not None:
+        if isinstance(batch_size, int) and batch_size > 0:
+            deref(options).batch_size = batch_size
+        else:
+            raise ValueError("Invalid ORC writer batch size: {0}"
+                             .format(batch_size))
+
+    # file_version
+
+    if file_version is not None:
+        if str(file_version) == "0.12":
+            deref(options).file_version = FileVersion(0, 12)
+        elif str(file_version) == "0.11":
+            deref(options).file_version = FileVersion(0, 11)
+        else:
+            raise ValueError("Unsupported ORC file version: {0}"
+                             .format(file_version))
+
+    # stripe_size
+
+    if stripe_size is not None:
+        if isinstance(stripe_size, int) and stripe_size > 0:
+            deref(options).stripe_size = stripe_size
+        else:
+            raise ValueError("Invalid ORC stripe size: {0}"
+                             .format(stripe_size))
+
+    # compression
+
+    if compression is not None:
+        if isinstance(compression, basestring):
+            deref(options).compression = compression_type_from_name(
+                compression)
+        else:
+            raise TypeError("Unsupported ORC compression type: {0}"
+                            .format(compression))
+
+    # compression_block_size
+
+    if compression_block_size is not None:
+        if (isinstance(compression_block_size, int) and
+                compression_block_size > 0):
+            deref(options).compression_block_size = compression_block_size
+        else:
+            raise ValueError("Invalid ORC compression block size: {0}"
+                             .format(compression_block_size))
+
+    # compression_strategy
+
+    if compression_strategy is not None:
+        if isinstance(compression, basestring):
+            deref(options).compression_strategy = \
+                compression_strategy_from_name(compression_strategy)
+        else:
+            raise TypeError("Unsupported ORC compression strategy: {0}"
+                            .format(compression_strategy))
+
+    # row_index_stride
+
+    if row_index_stride is not None:
+        if isinstance(row_index_stride, int) and row_index_stride > 0:
+            deref(options).row_index_stride = row_index_stride
+        else:
+            raise ValueError("Invalid ORC row index stride: {0}"
+                             .format(row_index_stride))
+
+    # padding_tolerance
+
+    if padding_tolerance is not None:
+        try:
+            padding_tolerance = float(padding_tolerance)
+            deref(options).padding_tolerance = padding_tolerance
+        except Exception:
+            raise ValueError("Invalid ORC padding tolerance: {0}"
+                             .format(padding_tolerance))
+
+    # dictionary_key_size_threshold
+
+    if dictionary_key_size_threshold is not None:
+        try:
+            dictionary_key_size_threshold = float(
+                dictionary_key_size_threshold)
+            deref(options).dictionary_key_size_threshold = \
+                dictionary_key_size_threshold

Review comment:
       Should you check the value is in [0, 1] here?

##########
File path: python/pyarrow/orc.py
##########
@@ -117,21 +185,92 @@ def read(self, columns=None):
         return self.reader.read(columns=columns)
 
 
+_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12"
+    Determine which ORC file version to use. Hive 0.11 / ORC v0 is the older
+    version as defined `here <https://orc.apache.org/specification/ORCv0/>`
+    while Hive 0.12 / ORC v1 is the newer one as defined
+    `here <https://orc.apache.org/specification/ORCv1/>`.
+batch_size : int, default 1024
+    Number of rows the ORC writer writes at a time.
+stripe_size : int, default 64 * 1024 * 1024
+    Size of each ORC stripe.
+compression : string, default 'zlib'
+    Specify the compression codec.
+    Valid values: {'UNCOMPRESSED', 'SNAPPY', 'ZLIB', 'LZ0', 'LZ4', 'ZSTD'}
+compression_block_size : int, default 64 * 1024
+    Specify the size of each compression block.

Review comment:
       ```suggestion
       The size of each compression block.
   ```

##########
File path: cpp/src/arrow/adapters/orc/options.h
##########
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <set>
+#include <sstream>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/util/type_fwd.h"
+#include "arrow/util/visibility.h"
+
+namespace arrow {
+
+namespace adapters {
+
+namespace orc {
+
+enum class WriterId {
+  kOrcJava = 0,
+  kOrcCpp = 1,
+  kPresto = 2,
+  kScritchleyGo = 3,
+  kTrino = 4,
+  kUnknown = INT32_MAX
+};
+
+enum class WriterVersion {
+  kOriginal = 0,
+  kHive8732 = 1,
+  kHive4243 = 2,
+  kHive12055 = 3,
+  kHive13083 = 4,
+  kOrc101 = 5,
+  kOrc135 = 6,
+  kOrc517 = 7,
+  kOrc203 = 8,
+  kOrc14 = 9,
+  kMax = INT32_MAX
+};
+
+enum class CompressionStrategy { kSpeed = 0, kCompression };
+
+enum class RleVersion { k1 = 0, k2 = 1 };
+
+enum class BloomFilterVersion {
+  // Include both the BLOOM_FILTER and BLOOM_FILTER_UTF8 streams to support
+  // both old and new readers.
+  kOriginal = 0,
+  // Only include the BLOOM_FILTER_UTF8 streams that consistently use UTF8.
+  // See ORC-101
+  kUtf8 = 1,
+  kFuture = INT32_MAX
+};
+
+class ARROW_EXPORT FileVersion {
+ private:
+  int32_t major_version;
+  int32_t minor_version;
+
+ public:
+  static const FileVersion& v_0_11();
+  static const FileVersion& v_0_12();
+
+  FileVersion(int32_t major, int32_t minor)
+      : major_version(major), minor_version(minor) {}
+
+  /**
+   * Get major version
+   */
+  int32_t major() const { return this->major_version; }
+
+  /**
+   * Get minor version
+   */
+  int32_t minor() const { return this->minor_version; }
+
+  bool operator==(const FileVersion& right) const {
+    return this->major_version == right.major() && this->minor_version == 
right.minor();
+  }
+
+  bool operator!=(const FileVersion& right) const { return !(*this == right); }
+
+  std::string ToString() const {
+    std::stringstream ss;
+    ss << major() << '.' << minor();
+    return ss.str();
+  }
+};
+
+/// Options for the ORC Writer
+struct ARROW_EXPORT WriteOptions {
+  /// Number of rows the ORC writer writes at a time, default 1024
+  int64_t batch_size = 1024;
+  /// Which ORC file version to use, default FileVersion(0, 12)
+  FileVersion file_version = FileVersion(0, 12);
+  /// Size of each ORC stripe, default 67108864
+  int64_t stripe_size = 67108864;

Review comment:
       ```suggestion
     /// Size of each ORC stripe, default 64MiB
     int64_t stripe_size = 64 * 1024 * 1024;
   ```

##########
File path: python/pyarrow/orc.py
##########
@@ -117,21 +185,92 @@ def read(self, columns=None):
         return self.reader.read(columns=columns)
 
 
+_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12"
+    Determine which ORC file version to use. Hive 0.11 / ORC v0 is the older
+    version as defined `here <https://orc.apache.org/specification/ORCv0/>`
+    while Hive 0.12 / ORC v1 is the newer one as defined
+    `here <https://orc.apache.org/specification/ORCv1/>`.
+batch_size : int, default 1024
+    Number of rows the ORC writer writes at a time.
+stripe_size : int, default 64 * 1024 * 1024
+    Size of each ORC stripe.
+compression : string, default 'zlib'
+    Specify the compression codec.

Review comment:
       ```suggestion
       The compression codec.
   ```

##########
File path: python/pyarrow/orc.py
##########
@@ -117,21 +185,92 @@ def read(self, columns=None):
         return self.reader.read(columns=columns)
 
 
+_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12"
+    Determine which ORC file version to use. Hive 0.11 / ORC v0 is the older
+    version as defined `here <https://orc.apache.org/specification/ORCv0/>`
+    while Hive 0.12 / ORC v1 is the newer one as defined
+    `here <https://orc.apache.org/specification/ORCv1/>`.
+batch_size : int, default 1024
+    Number of rows the ORC writer writes at a time.
+stripe_size : int, default 64 * 1024 * 1024
+    Size of each ORC stripe.
+compression : string, default 'zlib'
+    Specify the compression codec.
+    Valid values: {'UNCOMPRESSED', 'SNAPPY', 'ZLIB', 'LZ0', 'LZ4', 'ZSTD'}
+compression_block_size : int, default 64 * 1024
+    Specify the size of each compression block.
+compression_strategy : string, default 'speed'
+    Specify the compression strategy i.e. speed vs size reduction.
+    Valid values: {'SPEED', 'COMPRESSION'}
+row_index_stride : int, default 10000
+    Specify the row index stride i.e. the number of rows per
+    an entry in the row index.
+padding_tolerance : double, default 0.0
+    Set the padding tolerance.
+dictionary_key_size_threshold : double, default 0.0
+    Set the dictionary key size threshold. 0 to disable dictionary encoding.
+    1 to always enable dictionary encoding.
+bloom_filter_columns : None, set-like or list-like, default None
+    Set columns that use the bloom filter.
+bloom_filter_fpp: double, default 0.05
+    Set false positive probability of the bloom filter.
+"""
+
+
 class ORCWriter:
-    """
-    Writer interface for a single ORC file
+    __doc__ = """
+Writer interface for a single ORC file
 
-    Parameters
-    ----------
-    where : str or pyarrow.io.NativeFile
-        Writable target. For passing Python file objects or byte buffers,
-        see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
-        or pyarrow.io.FixedSizeBufferWriter.
-    """
+Parameters
+----------
+where : str or pyarrow.io.NativeFile
+    Writable target. For passing Python file objects or byte buffers,
+    see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
+    or pyarrow.io.FixedSizeBufferWriter.
+{}
+""".format(_orc_writer_args_docs)
+
+    def __init__(self, where, file_version='0.12',
+                 batch_size=1024,
+                 stripe_size=67108864,
+                 compression='zlib',

Review comment:
       Is it desired to use zlib by default? I would expect either 
uncompressed, or a more modern codec.
   (uncompressed is better as a default, IMHO)

##########
File path: python/pyarrow/orc.py
##########
@@ -143,35 +282,63 @@ def write(self, table):
         table : pyarrow.lib.Table
             The table to be written into the ORC file
         """
+        assert self.is_open
         self.writer.write(table)
 
     def close(self):
         """
         Close the ORC file
         """
-        self.writer.close()
+        if self.is_open:
+            self.writer.close()
+            self.is_open = False
 
 
-def write_table(table, where):
-    """
-    Write a table into an ORC file
-
-    Parameters
-    ----------
-    table : pyarrow.lib.Table
-        The table to be written into the ORC file
-    where : str or pyarrow.io.NativeFile
-        Writable target. For passing Python file objects or byte buffers,
-        see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
-        or pyarrow.io.FixedSizeBufferWriter.
-    """
+def write_table(table, where, file_version='0.12',

Review comment:
       ```suggestion
   def write_table(table, where, *, file_version='0.12',
   ```

##########
File path: python/pyarrow/orc.py
##########
@@ -117,21 +185,92 @@ def read(self, columns=None):
         return self.reader.read(columns=columns)
 
 
+_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12"
+    Determine which ORC file version to use. Hive 0.11 / ORC v0 is the older
+    version as defined `here <https://orc.apache.org/specification/ORCv0/>`
+    while Hive 0.12 / ORC v1 is the newer one as defined
+    `here <https://orc.apache.org/specification/ORCv1/>`.
+batch_size : int, default 1024
+    Number of rows the ORC writer writes at a time.
+stripe_size : int, default 64 * 1024 * 1024
+    Size of each ORC stripe.
+compression : string, default 'zlib'
+    Specify the compression codec.
+    Valid values: {'UNCOMPRESSED', 'SNAPPY', 'ZLIB', 'LZ0', 'LZ4', 'ZSTD'}
+compression_block_size : int, default 64 * 1024
+    Specify the size of each compression block.
+compression_strategy : string, default 'speed'
+    Specify the compression strategy i.e. speed vs size reduction.
+    Valid values: {'SPEED', 'COMPRESSION'}
+row_index_stride : int, default 10000
+    Specify the row index stride i.e. the number of rows per
+    an entry in the row index.
+padding_tolerance : double, default 0.0
+    Set the padding tolerance.
+dictionary_key_size_threshold : double, default 0.0
+    Set the dictionary key size threshold. 0 to disable dictionary encoding.
+    1 to always enable dictionary encoding.
+bloom_filter_columns : None, set-like or list-like, default None
+    Set columns that use the bloom filter.
+bloom_filter_fpp: double, default 0.05
+    Set false positive probability of the bloom filter.
+"""
+
+
 class ORCWriter:
-    """
-    Writer interface for a single ORC file
+    __doc__ = """
+Writer interface for a single ORC file
 
-    Parameters
-    ----------
-    where : str or pyarrow.io.NativeFile
-        Writable target. For passing Python file objects or byte buffers,
-        see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
-        or pyarrow.io.FixedSizeBufferWriter.
-    """
+Parameters
+----------
+where : str or pyarrow.io.NativeFile
+    Writable target. For passing Python file objects or byte buffers,
+    see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
+    or pyarrow.io.FixedSizeBufferWriter.
+{}
+""".format(_orc_writer_args_docs)
+

Review comment:
       Add this so that the `is_open` attribute is always defined?
   
   ```suggestion
   
       is_open = False
   
   ```

##########
File path: python/pyarrow/orc.py
##########
@@ -117,21 +185,92 @@ def read(self, columns=None):
         return self.reader.read(columns=columns)
 
 
+_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12"
+    Determine which ORC file version to use. Hive 0.11 / ORC v0 is the older
+    version as defined `here <https://orc.apache.org/specification/ORCv0/>`
+    while Hive 0.12 / ORC v1 is the newer one as defined
+    `here <https://orc.apache.org/specification/ORCv1/>`.
+batch_size : int, default 1024
+    Number of rows the ORC writer writes at a time.
+stripe_size : int, default 64 * 1024 * 1024
+    Size of each ORC stripe.
+compression : string, default 'zlib'
+    Specify the compression codec.
+    Valid values: {'UNCOMPRESSED', 'SNAPPY', 'ZLIB', 'LZ0', 'LZ4', 'ZSTD'}
+compression_block_size : int, default 64 * 1024
+    Specify the size of each compression block.
+compression_strategy : string, default 'speed'
+    Specify the compression strategy i.e. speed vs size reduction.
+    Valid values: {'SPEED', 'COMPRESSION'}
+row_index_stride : int, default 10000
+    Specify the row index stride i.e. the number of rows per
+    an entry in the row index.
+padding_tolerance : double, default 0.0
+    Set the padding tolerance.
+dictionary_key_size_threshold : double, default 0.0
+    Set the dictionary key size threshold. 0 to disable dictionary encoding.
+    1 to always enable dictionary encoding.
+bloom_filter_columns : None, set-like or list-like, default None
+    Set columns that use the bloom filter.
+bloom_filter_fpp: double, default 0.05
+    Set false positive probability of the bloom filter.
+"""
+
+
 class ORCWriter:
-    """
-    Writer interface for a single ORC file
+    __doc__ = """
+Writer interface for a single ORC file
 
-    Parameters
-    ----------
-    where : str or pyarrow.io.NativeFile
-        Writable target. For passing Python file objects or byte buffers,
-        see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
-        or pyarrow.io.FixedSizeBufferWriter.
-    """
+Parameters
+----------
+where : str or pyarrow.io.NativeFile
+    Writable target. For passing Python file objects or byte buffers,
+    see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream
+    or pyarrow.io.FixedSizeBufferWriter.
+{}
+""".format(_orc_writer_args_docs)
+
+    def __init__(self, where, file_version='0.12',
+                 batch_size=1024,
+                 stripe_size=67108864,
+                 compression='zlib',
+                 compression_block_size=65536,
+                 compression_strategy='speed',
+                 row_index_stride=10000,
+                 padding_tolerance=0.0,
+                 dictionary_key_size_threshold=0.0,
+                 bloom_filter_columns=None,
+                 bloom_filter_fpp=0.05,
+                 ):
 
-    def __init__(self, where):
         self.writer = _orc.ORCWriter()
-        self.writer.open(where)
+        self.writer.open(
+            where,
+            file_version=file_version,
+            batch_size=batch_size,
+            stripe_size=stripe_size,
+            compression=compression,
+            compression_block_size=compression_block_size,
+            compression_strategy=compression_strategy,
+            row_index_stride=row_index_stride,
+            padding_tolerance=padding_tolerance,
+            dictionary_key_size_threshold=dictionary_key_size_threshold,
+            bloom_filter_columns=bloom_filter_columns,
+            bloom_filter_fpp=bloom_filter_fpp
+        )
+        self.is_open = True
+
+    def __del__(self):
+        if getattr(self, 'is_open', False):
+            self.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args, **kwargs):
+        self.close()
+        # return false since we want to propagate exceptions

Review comment:
       Isn't propagation the default?

##########
File path: cpp/src/arrow/testing/random.h
##########
@@ -322,6 +322,16 @@ class ARROW_TESTING_EXPORT RandomArrayGenerator {
   std::shared_ptr<Array> FixedSizeBinary(int64_t size, int32_t byte_width,
                                          double null_probability = 0);
 
+  // /// \brief Generate a random StructArray

Review comment:
       If this is not implemented, please leave it out :-)

##########
File path: python/pyarrow/orc.py
##########
@@ -117,21 +185,92 @@ def read(self, columns=None):
         return self.reader.read(columns=columns)
 
 
+_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12"
+    Determine which ORC file version to use. Hive 0.11 / ORC v0 is the older
+    version as defined `here <https://orc.apache.org/specification/ORCv0/>`
+    while Hive 0.12 / ORC v1 is the newer one as defined
+    `here <https://orc.apache.org/specification/ORCv1/>`.
+batch_size : int, default 1024
+    Number of rows the ORC writer writes at a time.
+stripe_size : int, default 64 * 1024 * 1024
+    Size of each ORC stripe.
+compression : string, default 'zlib'
+    Specify the compression codec.
+    Valid values: {'UNCOMPRESSED', 'SNAPPY', 'ZLIB', 'LZ0', 'LZ4', 'ZSTD'}
+compression_block_size : int, default 64 * 1024
+    Specify the size of each compression block.

Review comment:
       Same below: no need to add "Specify" or "Set" in front of each parameter 
description.

##########
File path: python/pyarrow/_orc.pyx
##########
@@ -36,7 +36,233 @@ from pyarrow.lib cimport (check_status, _Weakrefable,
                           pyarrow_unwrap_table,
                           get_reader,
                           get_writer)
-from pyarrow.lib import tobytes
+from pyarrow.lib import frombytes, tobytes
+
+
+cdef compression_type_from_enum(CCompressionType compression_type_):
+    return {
+        CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
+        CCompressionType_GZIP: 'ZLIB',
+        CCompressionType_SNAPPY: 'SNAPPY',
+        CCompressionType_LZ4: 'LZ4',
+        CCompressionType_ZSTD: 'ZSTD',
+    }.get(compression_type_, 'UNKNOWN')
+
+
+cdef CCompressionType compression_type_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression must be a string')
+    name = name.upper()
+    if name == 'ZLIB':
+        return CCompressionType_GZIP
+    elif name == 'SNAPPY':
+        return CCompressionType_SNAPPY
+    elif name == 'LZ4':
+        return CCompressionType_LZ4
+    elif name == 'ZSTD':
+        return CCompressionType_ZSTD
+    elif name == 'UNCOMPRESSED':
+        return CCompressionType_UNCOMPRESSED
+    raise ValueError('Unknown CompressionKind: {0}'.format(name))
+
+
+cdef compression_strategy_from_enum(CompressionStrategy compression_strategy_):
+    return {
+        _CompressionStrategy_SPEED: 'SPEED',
+        _CompressionStrategy_COMPRESSION: 'COMPRESSION',
+    }.get(compression_strategy_, 'UNKNOWN')
+
+
+cdef CompressionStrategy compression_strategy_from_name(name) except *:
+    if not isinstance(name, str):
+        raise TypeError('compression strategy must be a string')
+    name = name.upper()
+    # SPEED is the default value in the ORC C++ implementaton
+    if name == 'COMPRESSION':
+        return _CompressionStrategy_COMPRESSION
+    elif name == 'SPEED':
+        return _CompressionStrategy_SPEED
+    raise ValueError('Unknown CompressionStrategy: {0}'.format(name))
+
+
+cdef rle_version_from_enum(RleVersion rle_version_):
+    return {
+        _RleVersion_1: '1',
+        _RleVersion_2: '2',
+    }.get(rle_version_, 'UNKNOWN')
+
+
+cdef bloom_filter_version_from_enum(BloomFilterVersion bloom_filter_version_):
+    return {
+        _BloomFilterVersion_ORIGINAL: 'ORIGINAL',
+        _BloomFilterVersion_UTF8: 'UTF8',
+        _BloomFilterVersion_FUTURE: 'FUTURE',
+    }.get(bloom_filter_version_, 'UNKNOWN')
+
+
+cdef file_version_from_class(FileVersion file_version_):
+    cdef object file_version = file_version_.ToString()
+    return frombytes(file_version)
+
+
+cdef writer_id_from_enum(WriterId writer_id_):
+    return {
+        _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
+        _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
+        _WriterId_PRESTO_WRITER: 'PRESTO',
+        _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
+        _WriterId_TRINO_WRITER: 'TRINO',
+    }.get(writer_id_, 'UNKNOWN')
+
+
+cdef writer_version_from_enum(WriterVersion writer_version_):
+    return {
+        _WriterVersion_ORIGINAL: 'ORIGINAL',
+        _WriterVersion_HIVE_8732: 'HIVE_8732',
+        _WriterVersion_HIVE_4243: 'HIVE_4243',
+        _WriterVersion_HIVE_12055: 'HIVE_12055',
+        _WriterVersion_HIVE_13083: 'HIVE_13083',
+        _WriterVersion_ORC_101: 'ORC_101',
+        _WriterVersion_ORC_135: 'ORC_135',
+        _WriterVersion_ORC_517: 'ORC_517',
+        _WriterVersion_ORC_203: 'ORC_203',
+        _WriterVersion_ORC_14: 'ORC_14',
+    }.get(writer_version_, 'UNKNOWN')
+
+
+cdef shared_ptr[WriteOptions] _create_write_options(
+    file_version=None,
+    batch_size=None,
+    stripe_size=None,
+    compression=None,
+    compression_block_size=None,
+    compression_strategy=None,
+    row_index_stride=None,
+    padding_tolerance=None,
+    dictionary_key_size_threshold=None,
+    bloom_filter_columns=None,
+    bloom_filter_fpp=None
+) except *:
+    """General writer options"""
+    cdef:
+        shared_ptr[WriteOptions] options
+
+    options = make_shared[WriteOptions]()
+
+    # batch_size
+
+    if batch_size is not None:
+        if isinstance(batch_size, int) and batch_size > 0:
+            deref(options).batch_size = batch_size
+        else:
+            raise ValueError("Invalid ORC writer batch size: {0}"
+                             .format(batch_size))

Review comment:
       Same in other instances below :-)




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to