This is an automated email from the ASF dual-hosted git repository.

agrove pushed a commit to branch branch-0.13
in repository https://gitbox.apache.org/repos/asf/datafusion-comet.git


The following commit(s) were added to refs/heads/branch-0.13 by this push:
     new fefdc26ce chore: [branch-0.13] Prepare 0.13.0 release (#3285)
fefdc26ce is described below

commit fefdc26ced0c854f92d80d939f3427c790dcaddc
Author: Andy Grove <[email protected]>
AuthorDate: Mon Jan 26 09:28:06 2026 -0700

    chore: [branch-0.13] Prepare 0.13.0 release (#3285)
---
 common/pom.xml                                 |   2 +-
 dev/diffs/3.4.3.diff                           |   2 +-
 dev/diffs/3.5.7.diff                           |   2 +-
 dev/diffs/4.0.1.diff                           |   2 +-
 dev/diffs/iceberg-rust/1.10.0.diff             |   2 +-
 dev/diffs/iceberg-rust/1.8.1.diff              |   2 +-
 dev/diffs/iceberg-rust/1.9.1.diff              |   2 +-
 dev/diffs/iceberg/1.10.0.diff                  |   2 +-
 dev/diffs/iceberg/1.8.1.diff                   |   2 +-
 dev/diffs/iceberg/1.9.1.diff                   |   2 +-
 dev/generate-release-docs.sh                   |   8 +-
 docs/source/user-guide/latest/compatibility.md |  84 +++++++
 docs/source/user-guide/latest/configs.md       | 297 +++++++++++++++++++++++++
 fuzz-testing/pom.xml                           |   2 +-
 native/Cargo.lock                              |  68 +++---
 pom.xml                                        |   2 +-
 spark-integration/pom.xml                      |   2 +-
 spark/pom.xml                                  |  30 ++-
 18 files changed, 458 insertions(+), 55 deletions(-)

diff --git a/common/pom.xml b/common/pom.xml
index c7a6147c3..37c086aa5 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -26,7 +26,7 @@ under the License.
   <parent>
     <groupId>org.apache.datafusion</groupId>
     
<artifactId>comet-parent-spark${spark.version.short}_${scala.binary.version}</artifactId>
-    <version>0.13.0-SNAPSHOT</version>
+    <version>0.13.0</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
diff --git a/dev/diffs/3.4.3.diff b/dev/diffs/3.4.3.diff
index fc8fd923f..7e4f72ac3 100644
--- a/dev/diffs/3.4.3.diff
+++ b/dev/diffs/3.4.3.diff
@@ -7,7 +7,7 @@ index d3544881af1..07d1ed97925 100644
      <ivy.version>2.5.1</ivy.version>
      <oro.version>2.0.8</oro.version>
 +    <spark.version.short>3.4</spark.version.short>
-+    <comet.version>0.13.0-SNAPSHOT</comet.version>
++    <comet.version>0.13.0</comet.version>
      <!--
      If you changes codahale.metrics.version, you also need to change
      the link to metrics.dropwizard.io in docs/monitoring.md.
diff --git a/dev/diffs/3.5.7.diff b/dev/diffs/3.5.7.diff
index 3adc8c833..7bf132d7f 100644
--- a/dev/diffs/3.5.7.diff
+++ b/dev/diffs/3.5.7.diff
@@ -7,7 +7,7 @@ index a0e25ce4d8d..29d3b93f994 100644
      <ivy.version>2.5.1</ivy.version>
      <oro.version>2.0.8</oro.version>
 +    <spark.version.short>3.5</spark.version.short>
-+    <comet.version>0.13.0-SNAPSHOT</comet.version>
++    <comet.version>0.13.0</comet.version>
      <!--
      If you changes codahale.metrics.version, you also need to change
      the link to metrics.dropwizard.io in docs/monitoring.md.
diff --git a/dev/diffs/4.0.1.diff b/dev/diffs/4.0.1.diff
index a9315db00..84ed43e29 100644
--- a/dev/diffs/4.0.1.diff
+++ b/dev/diffs/4.0.1.diff
@@ -7,7 +7,7 @@ index 22922143fc3..477d4ec4194 100644
      <ivy.version>2.5.3</ivy.version>
      <oro.version>2.0.8</oro.version>
 +    <spark.version.short>4.0</spark.version.short>
-+    <comet.version>0.13.0-SNAPSHOT</comet.version>
++    <comet.version>0.13.0</comet.version>
      <!--
      If you change codahale.metrics.version, you also need to change
      the link to metrics.dropwizard.io in docs/monitoring.md.
diff --git a/dev/diffs/iceberg-rust/1.10.0.diff 
b/dev/diffs/iceberg-rust/1.10.0.diff
index da56aabfe..6e6cf150a 100644
--- a/dev/diffs/iceberg-rust/1.10.0.diff
+++ b/dev/diffs/iceberg-rust/1.10.0.diff
@@ -25,7 +25,7 @@ index eeabe54f5..867018058 100644
  caffeine = "2.9.3"
  calcite = "1.40.0"
 -comet = "0.8.1"
-+comet = "0.13.0-SNAPSHOT"
++comet = "0.13.0"
  datasketches = "6.2.0"
  delta-standalone = "3.3.2"
  delta-spark = "3.3.2"
diff --git a/dev/diffs/iceberg-rust/1.8.1.diff 
b/dev/diffs/iceberg-rust/1.8.1.diff
index aa9774d96..838ac8983 100644
--- a/dev/diffs/iceberg-rust/1.8.1.diff
+++ b/dev/diffs/iceberg-rust/1.8.1.diff
@@ -24,7 +24,7 @@ index 04ffa8f4e..3a57af315 100644
  awssdk-s3accessgrants = "2.3.0"
  caffeine = "2.9.3"
  calcite = "1.10.0"
-+comet = "0.13.0-SNAPSHOT"
++comet = "0.13.0"
  datasketches = "6.2.0"
  delta-standalone = "3.3.0"
  delta-spark = "3.3.0"
diff --git a/dev/diffs/iceberg-rust/1.9.1.diff 
b/dev/diffs/iceberg-rust/1.9.1.diff
index d9bcdb2e3..2cc3a25a5 100644
--- a/dev/diffs/iceberg-rust/1.9.1.diff
+++ b/dev/diffs/iceberg-rust/1.9.1.diff
@@ -24,7 +24,7 @@ index c50991c5f..3acb395a6 100644
  bson-ver = "4.11.5"
  caffeine = "2.9.3"
  calcite = "1.39.0"
-+comet = "0.13.0-SNAPSHOT"
++comet = "0.13.0"
  datasketches = "6.2.0"
  delta-standalone = "3.3.1"
  delta-spark = "3.3.1"
diff --git a/dev/diffs/iceberg/1.10.0.diff b/dev/diffs/iceberg/1.10.0.diff
index dbce4622e..95583599b 100644
--- a/dev/diffs/iceberg/1.10.0.diff
+++ b/dev/diffs/iceberg/1.10.0.diff
@@ -25,7 +25,7 @@ index eeabe54f5f0..867018058ee 100644
  caffeine = "2.9.3"
  calcite = "1.40.0"
 -comet = "0.8.1"
-+comet = "0.13.0-SNAPSHOT"
++comet = "0.13.0"
  datasketches = "6.2.0"
  delta-standalone = "3.3.2"
  delta-spark = "3.3.2"
diff --git a/dev/diffs/iceberg/1.8.1.diff b/dev/diffs/iceberg/1.8.1.diff
index 999ce63b8..3c3a548a9 100644
--- a/dev/diffs/iceberg/1.8.1.diff
+++ b/dev/diffs/iceberg/1.8.1.diff
@@ -24,7 +24,7 @@ index 04ffa8f4edc..3a57af3156a 100644
  awssdk-s3accessgrants = "2.3.0"
  caffeine = "2.9.3"
  calcite = "1.10.0"
-+comet = "0.13.0-SNAPSHOT"
++comet = "0.13.0"
  datasketches = "6.2.0"
  delta-standalone = "3.3.0"
  delta-spark = "3.3.0"
diff --git a/dev/diffs/iceberg/1.9.1.diff b/dev/diffs/iceberg/1.9.1.diff
index 2eee87d7d..6f04a5883 100644
--- a/dev/diffs/iceberg/1.9.1.diff
+++ b/dev/diffs/iceberg/1.9.1.diff
@@ -24,7 +24,7 @@ index c50991c5fc6..f7ad00f0b78 100644
  bson-ver = "4.11.5"
  caffeine = "2.9.3"
  calcite = "1.39.0"
-+comet = "0.13.0-SNAPSHOT"
++comet = "0.13.0"
  datasketches = "6.2.0"
  delta-standalone = "3.3.1"
  delta-spark = "3.3.1"
diff --git a/dev/generate-release-docs.sh b/dev/generate-release-docs.sh
index 5da77a9d6..747d1649b 100755
--- a/dev/generate-release-docs.sh
+++ b/dev/generate-release-docs.sh
@@ -42,13 +42,7 @@ PROJECT_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
 cd "${PROJECT_ROOT}"
 
 echo "Compiling common and spark modules..."
-./mvnw -q compile -pl common,spark -DskipTests
-
-echo "Generating documentation content..."
-./mvnw -q exec:java -pl spark \
-  -Dexec.mainClass=org.apache.comet.GenerateDocs \
-  -Dexec.arguments="${PROJECT_ROOT}/docs/source/user-guide/latest/" \
-  -Dexec.classpathScope=compile
+./mvnw package -Pgenerate-docs -DskipTests -Dmaven.test.skip=true
 
 echo ""
 echo "Done! Generated documentation content in docs/source/user-guide/latest/"
diff --git a/docs/source/user-guide/latest/compatibility.md 
b/docs/source/user-guide/latest/compatibility.md
index c09f6a61e..6f5e42bdb 100644
--- a/docs/source/user-guide/latest/compatibility.md
+++ b/docs/source/user-guide/latest/compatibility.md
@@ -111,16 +111,100 @@ Cast operations in Comet fall into three levels of 
support:
 ### Legacy Mode
 
 <!--BEGIN:CAST_LEGACY_TABLE-->
+<!-- prettier-ignore-start -->
+| | binary | boolean | byte | date | decimal | double | float | integer | long 
| short | string | timestamp |
+|---|---|---|---|---|---|---|---|---|---|---|---|---|
+| binary | - | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | C | N/A |
+| boolean | N/A | - | C | N/A | U | C | C | C | C | C | C | U |
+| byte | U | C | - | N/A | C | C | C | C | C | C | C | U |
+| date | N/A | U | U | - | U | U | U | U | U | U | C | U |
+| decimal | N/A | C | C | N/A | - | C | C | C | C | C | C | U |
+| double | N/A | C | C | N/A | I | - | C | C | C | C | C | U |
+| float | N/A | C | C | N/A | I | C | - | C | C | C | C | U |
+| integer | U | C | C | N/A | C | C | C | - | C | C | C | U |
+| long | U | C | C | N/A | C | C | C | C | - | C | C | U |
+| short | U | C | C | N/A | C | C | C | C | C | - | C | U |
+| string | C | C | C | C | I | C | C | C | C | C | - | I |
+| timestamp | N/A | U | U | C | U | U | U | U | C | U | C | - |
+<!-- prettier-ignore-end -->
+
+**Notes:**
+
+- **decimal -> string**: There can be formatting differences in some case due 
to Spark using scientific notation where Comet does not
+- **double -> decimal**: There can be rounding differences
+- **double -> string**: There can be differences in precision. For example, 
the input "1.4E-45" will produce 1.0E-45 instead of 1.4E-45
+- **float -> decimal**: There can be rounding differences
+- **float -> string**: There can be differences in precision. For example, the 
input "1.4E-45" will produce 1.0E-45 instead of 1.4E-45
+- **string -> date**: Only supports years between 262143 BC and 262142 AD
+- **string -> decimal**: Does not support fullwidth unicode digits (e.g 
\\uFF10)
+  or strings containing null bytes (e.g \\u0000)
+- **string -> timestamp**: Not all valid formats are supported
 <!--END:CAST_LEGACY_TABLE-->
 
 ### Try Mode
 
 <!--BEGIN:CAST_TRY_TABLE-->
+<!-- prettier-ignore-start -->
+| | binary | boolean | byte | date | decimal | double | float | integer | long 
| short | string | timestamp |
+|---|---|---|---|---|---|---|---|---|---|---|---|---|
+| binary | - | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | C | N/A |
+| boolean | N/A | - | C | N/A | U | C | C | C | C | C | C | U |
+| byte | U | C | - | N/A | C | C | C | C | C | C | C | U |
+| date | N/A | U | U | - | U | U | U | U | U | U | C | U |
+| decimal | N/A | C | C | N/A | - | C | C | C | C | C | C | U |
+| double | N/A | C | C | N/A | I | - | C | C | C | C | C | U |
+| float | N/A | C | C | N/A | I | C | - | C | C | C | C | U |
+| integer | U | C | C | N/A | C | C | C | - | C | C | C | U |
+| long | U | C | C | N/A | C | C | C | C | - | C | C | U |
+| short | U | C | C | N/A | C | C | C | C | C | - | C | U |
+| string | C | C | C | C | I | C | C | C | C | C | - | I |
+| timestamp | N/A | U | U | C | U | U | U | U | C | U | C | - |
+<!-- prettier-ignore-end -->
+
+**Notes:**
+
+- **decimal -> string**: There can be formatting differences in some case due 
to Spark using scientific notation where Comet does not
+- **double -> decimal**: There can be rounding differences
+- **double -> string**: There can be differences in precision. For example, 
the input "1.4E-45" will produce 1.0E-45 instead of 1.4E-45
+- **float -> decimal**: There can be rounding differences
+- **float -> string**: There can be differences in precision. For example, the 
input "1.4E-45" will produce 1.0E-45 instead of 1.4E-45
+- **string -> date**: Only supports years between 262143 BC and 262142 AD
+- **string -> decimal**: Does not support fullwidth unicode digits (e.g 
\\uFF10)
+  or strings containing null bytes (e.g \\u0000)
+- **string -> timestamp**: Not all valid formats are supported
 <!--END:CAST_TRY_TABLE-->
 
 ### ANSI Mode
 
 <!--BEGIN:CAST_ANSI_TABLE-->
+<!-- prettier-ignore-start -->
+| | binary | boolean | byte | date | decimal | double | float | integer | long 
| short | string | timestamp |
+|---|---|---|---|---|---|---|---|---|---|---|---|---|
+| binary | - | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | C | N/A |
+| boolean | N/A | - | C | N/A | U | C | C | C | C | C | C | U |
+| byte | U | C | - | N/A | C | C | C | C | C | C | C | U |
+| date | N/A | U | U | - | U | U | U | U | U | U | C | U |
+| decimal | N/A | C | C | N/A | - | C | C | C | C | C | C | U |
+| double | N/A | C | C | N/A | I | - | C | C | C | C | C | U |
+| float | N/A | C | C | N/A | I | C | - | C | C | C | C | U |
+| integer | U | C | C | N/A | C | C | C | - | C | C | C | U |
+| long | U | C | C | N/A | C | C | C | C | - | C | C | U |
+| short | U | C | C | N/A | C | C | C | C | C | - | C | U |
+| string | C | C | C | C | I | C | C | C | C | C | - | I |
+| timestamp | N/A | U | U | C | U | U | U | U | C | U | C | - |
+<!-- prettier-ignore-end -->
+
+**Notes:**
+
+- **decimal -> string**: There can be formatting differences in some case due 
to Spark using scientific notation where Comet does not
+- **double -> decimal**: There can be rounding differences
+- **double -> string**: There can be differences in precision. For example, 
the input "1.4E-45" will produce 1.0E-45 instead of 1.4E-45
+- **float -> decimal**: There can be rounding differences
+- **float -> string**: There can be differences in precision. For example, the 
input "1.4E-45" will produce 1.0E-45 instead of 1.4E-45
+- **string -> date**: Only supports years between 262143 BC and 262142 AD
+- **string -> decimal**: Does not support fullwidth unicode digits (e.g 
\\uFF10)
+  or strings containing null bytes (e.g \\u0000)
+- **string -> timestamp**: ANSI mode not supported
 <!--END:CAST_ANSI_TABLE-->
 
 See the [tracking 
issue](https://github.com/apache/datafusion-comet/issues/286) for more details.
diff --git a/docs/source/user-guide/latest/configs.md 
b/docs/source/user-guide/latest/configs.md
index a268691a3..9828b8b7f 100644
--- a/docs/source/user-guide/latest/configs.md
+++ b/docs/source/user-guide/latest/configs.md
@@ -24,16 +24,54 @@ Comet provides the following configuration settings.
 ## Scan Configuration Settings
 
 <!--BEGIN:CONFIG_TABLE[scan]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.scan.enabled` | Whether to enable native scans. When this is 
turned on, Spark will use Comet to read supported data sources (currently only 
Parquet is supported natively). Note that to enable native vectorized 
execution, both this config and `spark.comet.exec.enabled` need to be enabled. 
| true |
+| `spark.comet.scan.icebergNative.enabled` | Whether to enable native Iceberg 
table scan using iceberg-rust. When enabled, Iceberg tables are read directly 
through native execution, bypassing Spark's DataSource V2 API for better 
performance. | false |
+| `spark.comet.scan.preFetch.enabled` | Whether to enable pre-fetching feature 
of CometScan. | false |
+| `spark.comet.scan.preFetch.threadNum` | The number of threads running 
pre-fetching for CometScan. Effective if spark.comet.scan.preFetch.enabled is 
enabled. Note that more pre-fetching threads means more memory requirement to 
store pre-fetched row groups. | 2 |
+| `spark.comet.scan.unsignedSmallIntSafetyCheck` | Parquet files may contain 
unsigned 8-bit integers (UINT_8) which Spark maps to ShortType. When this 
config is true (default), Comet falls back to Spark for ShortType columns 
because we cannot distinguish signed INT16 (safe) from unsigned UINT_8 (may 
produce different results). Set to false to allow native execution of ShortType 
columns if you know your data does not contain unsigned UINT_8 columns from 
improperly encoded Parquet files. F [...]
+| `spark.hadoop.fs.comet.libhdfs.schemes` | Defines filesystem schemes (e.g., 
hdfs, webhdfs) that the native side accesses via libhdfs, separated by commas. 
Valid only when built with hdfs feature enabled. | |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Parquet Reader Configuration Settings
 
 <!--BEGIN:CONFIG_TABLE[parquet]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.parquet.enable.directBuffer` | Whether to use Java direct byte 
buffer when reading Parquet. | false |
+| `spark.comet.parquet.read.io.adjust.readRange.skew` | In the parallel 
reader, if the read ranges submitted are skewed in sizes, this option will 
cause the reader to break up larger read ranges into smaller ranges to reduce 
the skew. This will result in a slightly larger number of connections opened to 
the file system but may give improved performance. | false |
+| `spark.comet.parquet.read.io.mergeRanges` | When enabled the parallel reader 
will try to merge ranges of data that are separated by less than 
`comet.parquet.read.io.mergeRanges.delta` bytes. Longer continuous reads are 
faster on cloud storage. | true |
+| `spark.comet.parquet.read.io.mergeRanges.delta` | The delta in bytes between 
consecutive read ranges below which the parallel reader will try to merge the 
ranges. The default is 8MB. | 8388608 |
+| `spark.comet.parquet.read.parallel.io.enabled` | Whether to enable Comet's 
parallel reader for Parquet files. The parallel reader reads ranges of 
consecutive data in a  file in parallel. It is faster for large files and row 
groups but uses more resources. | true |
+| `spark.comet.parquet.read.parallel.io.thread-pool.size` | The maximum number 
of parallel threads the parallel reader will use in a single executor. For 
executors configured with a smaller number of cores, use a smaller number. | 16 
|
+| `spark.comet.parquet.respectFilterPushdown` | Whether to respect Spark's 
PARQUET_FILTER_PUSHDOWN_ENABLED config. This needs to be respected when running 
the Spark SQL test suite but the default setting results in poor performance in 
Comet when using the new native scans, disabled by default | false |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Query Execution Settings
 
 <!--BEGIN:CONFIG_TABLE[exec]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.caseConversion.enabled` | Java uses locale-specific rules when 
converting strings to upper or lower case and Rust does not, so we disable 
upper and lower by default. | false |
+| `spark.comet.debug.enabled` | Whether to enable debug mode for Comet. When 
enabled, Comet will do additional checks for debugging purpose. For example, 
validating array when importing arrays from JVM at native side. Note that these 
checks may be expensive in performance and should only be enabled for debugging 
purpose. | false |
+| `spark.comet.dppFallback.enabled` | Whether to fall back to Spark for 
queries that use DPP. | true |
+| `spark.comet.enabled` | Whether to enable Comet extension for Spark. When 
this is turned on, Spark will use Comet to read Parquet data source. Note that 
to enable native vectorized execution, both this config and 
`spark.comet.exec.enabled` need to be enabled. It can be overridden by the 
environment variable `ENABLE_COMET`. | true |
+| `spark.comet.exceptionOnDatetimeRebase` | Whether to throw exception when 
seeing dates/timestamps from the legacy hybrid (Julian + Gregorian) calendar. 
Since Spark 3, dates/timestamps were written according to the Proleptic 
Gregorian calendar. When this is true, Comet will throw exceptions when seeing 
these dates/timestamps that were written by Spark version before 3.0. If this 
is false, these dates/timestamps will be read as if they were written to the 
Proleptic Gregorian calendar and [...]
+| `spark.comet.exec.columnarToRow.native.enabled` | Whether to enable native 
columnar to row conversion. When enabled, Comet will use native Rust code to 
convert Arrow columnar data to Spark UnsafeRow format instead of the JVM 
implementation. This can improve performance for queries that need to convert 
between columnar and row formats. This is an experimental feature. | false |
+| `spark.comet.exec.enabled` | Whether to enable Comet native vectorized 
execution for Spark. This controls whether Spark should convert operators into 
their Comet counterparts and execute them in native space. Note: each operator 
is associated with a separate config in the format of 
`spark.comet.exec.<operator_name>.enabled` at the moment, and both the config 
and this need to be turned on, in order for the operator to be executed in 
native. | true |
+| `spark.comet.exec.replaceSortMergeJoin` | Experimental feature to force 
Spark to replace SortMergeJoin with ShuffledHashJoin for improved performance. 
This feature is not stable yet. For more information, refer to the [Comet 
Tuning Guide](https://datafusion.apache.org/comet/user-guide/tuning.html). | 
false |
+| `spark.comet.exec.strictFloatingPoint` | When enabled, fall back to Spark 
for floating-point operations that may differ from Spark, such as when 
comparing or sorting -0.0 and 0.0. For more information, refer to the [Comet 
Compatibility 
Guide](https://datafusion.apache.org/comet/user-guide/compatibility.html). | 
false |
+| `spark.comet.maxTempDirectorySize` | The maximum amount of data (in bytes) 
stored inside the temporary directories. | 107374182400b |
+| `spark.comet.metrics.updateInterval` | The interval in milliseconds to 
update metrics. If interval is negative, metrics will be updated upon task 
completion. | 3000 |
+| `spark.comet.nativeLoadRequired` | Whether to require Comet native library 
to load successfully when Comet is enabled. If not, Comet will silently 
fallback to Spark when it fails to load the native lib. Otherwise, an error 
will be thrown and the Spark job will be aborted. | false |
+| `spark.comet.regexp.allowIncompatible` | Comet is not currently fully 
compatible with Spark for all regular expressions. Set this config to true to 
allow them anyway. For more information, refer to the [Comet Compatibility 
Guide](https://datafusion.apache.org/comet/user-guide/compatibility.html). | 
false |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Viewing Explain Plan & Fallback Reasons
@@ -41,34 +79,293 @@ Comet provides the following configuration settings.
 These settings can be used to determine which parts of the plan are 
accelerated by Comet and to see why some parts of the plan could not be 
supported by Comet.
 
 <!--BEGIN:CONFIG_TABLE[exec_explain]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.explain.format` | Choose extended explain output. The default 
format of 'verbose' will provide the full query plan annotated with fallback 
reasons as well as a summary of how much of the plan was accelerated by Comet. 
The format 'fallback' provides a list of fallback reasons instead. | verbose |
+| `spark.comet.explain.native.enabled` | When this setting is enabled, Comet 
will provide a tree representation of the native query plan before execution 
and again after execution, with metrics. | false |
+| `spark.comet.explain.rules` | When this setting is enabled, Comet will log 
all plan transformations performed in physical optimizer rules. Default: false 
| false |
+| `spark.comet.explainFallback.enabled` | When this setting is enabled, Comet 
will provide logging explaining the reason(s) why a query stage cannot be 
executed natively. Set this to false to reduce the amount of logging. | false |
+| `spark.comet.logFallbackReasons.enabled` | When this setting is enabled, 
Comet will log warnings for all fallback reasons. It can be overridden by the 
environment variable `ENABLE_COMET_LOG_FALLBACK_REASONS`. | false |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Shuffle Configuration Settings
 
 <!--BEGIN:CONFIG_TABLE[shuffle]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.columnar.shuffle.async.enabled` | Whether to enable 
asynchronous shuffle for Arrow-based shuffle. | false |
+| `spark.comet.columnar.shuffle.async.max.thread.num` | Maximum number of 
threads on an executor used for Comet async columnar shuffle. This is the upper 
bound of total number of shuffle threads per executor. In other words, if the 
number of cores * the number of shuffle threads per task 
`spark.comet.columnar.shuffle.async.thread.num` is larger than this config. 
Comet will use this config as the number of shuffle threads per executor 
instead. | 100 |
+| `spark.comet.columnar.shuffle.async.thread.num` | Number of threads used for 
Comet async columnar shuffle per shuffle task. Note that more threads means 
more memory requirement to buffer shuffle data before flushing to disk. Also, 
more threads may not always improve performance, and should be set based on the 
number of cores available. | 3 |
+| `spark.comet.columnar.shuffle.batch.size` | Batch size when writing out 
sorted spill files on the native side. Note that this should not be larger than 
batch size (i.e., `spark.comet.batchSize`). Otherwise it will produce larger 
batches than expected in the native operator after shuffle. | 8192 |
+| `spark.comet.exec.shuffle.compression.codec` | The codec of Comet native 
shuffle used to compress shuffle data. lz4, zstd, and snappy are supported. 
Compression can be disabled by setting spark.shuffle.compress=false. | lz4 |
+| `spark.comet.exec.shuffle.compression.zstd.level` | The compression level to 
use when compressing shuffle files with zstd. | 1 |
+| `spark.comet.exec.shuffle.enabled` | Whether to enable Comet native shuffle. 
Note that this requires setting `spark.shuffle.manager` to 
`org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager`. 
`spark.shuffle.manager` must be set before starting the Spark application and 
cannot be changed during the application. | true |
+| `spark.comet.exec.shuffle.writeBufferSize` | Size of the write buffer in 
bytes used by the native shuffle writer when writing shuffle data to disk. 
Larger values may improve write performance by reducing the number of system 
calls, but will use more memory. The default is 1MB which provides a good 
balance between performance and memory usage. | 1048576b |
+| `spark.comet.native.shuffle.partitioning.hash.enabled` | Whether to enable 
hash partitioning for Comet native shuffle. | true |
+| `spark.comet.native.shuffle.partitioning.range.enabled` | Whether to enable 
range partitioning for Comet native shuffle. | true |
+| `spark.comet.native.shuffle.partitioning.roundrobin.enabled` | Whether to 
enable round robin partitioning for Comet native shuffle. This is disabled by 
default because Comet's round-robin produces different partition assignments 
than Spark. Spark sorts rows by their binary UnsafeRow representation before 
assigning partitions, but Comet uses Arrow format which has a different binary 
layout. Instead, Comet implements round-robin as hash partitioning on all 
columns, which achieves the sam [...]
+| `spark.comet.native.shuffle.partitioning.roundrobin.maxHashColumns` | The 
maximum number of columns to hash for round robin partitioning. When set to 0 
(the default), all columns are hashed. When set to a positive value, only the 
first N columns are used for hashing, which can improve performance for wide 
tables while still providing reasonable distribution. | 0 |
+| `spark.comet.shuffle.preferDictionary.ratio` | The ratio of total values to 
distinct values in a string column to decide whether to prefer dictionary 
encoding when shuffling the column. If the ratio is higher than this config, 
dictionary encoding will be used on shuffling string column. This config is 
effective if it is higher than 1.0. Note that this config is only used when 
`spark.comet.exec.shuffle.mode` is `jvm`. | 10.0 |
+| `spark.comet.shuffle.sizeInBytesMultiplier` | Comet reports smaller sizes 
for shuffle due to using Arrow's columnar memory format and this can result in 
Spark choosing a different join strategy due to the estimated size of the 
exchange being smaller. Comet will multiple sizeInBytes by this amount to avoid 
regressions in join strategy. | 1.0 |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Memory & Tuning Configuration Settings
 
 <!--BEGIN:CONFIG_TABLE[tuning]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.batchSize` | The columnar batch size, i.e., the maximum number 
of rows that a batch can contain. | 8192 |
+| `spark.comet.exec.memoryPool` | The type of memory pool to be used for Comet 
native execution when running Spark in off-heap mode. Available pool types are 
`greedy_unified` and `fair_unified`. For more information, refer to the [Comet 
Tuning Guide](https://datafusion.apache.org/comet/user-guide/tuning.html). | 
fair_unified |
+| `spark.comet.exec.memoryPool.fraction` | Fraction of off-heap memory pool 
that is available to Comet. Only applies to off-heap mode. For more 
information, refer to the [Comet Tuning 
Guide](https://datafusion.apache.org/comet/user-guide/tuning.html). | 1.0 |
+| `spark.comet.tracing.enabled` | Enable fine-grained tracing of events and 
memory usage. For more information, refer to the [Comet Tracing 
Guide](https://datafusion.apache.org/comet/contributor-guide/tracing.html). | 
false |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Development & Testing Settings
 
 <!--BEGIN:CONFIG_TABLE[testing]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.columnar.shuffle.memory.factor` | Fraction of Comet memory to 
be allocated per executor process for columnar shuffle when running in on-heap 
mode. For more information, refer to the [Comet Tuning 
Guide](https://datafusion.apache.org/comet/user-guide/tuning.html). | 1.0 |
+| `spark.comet.convert.csv.enabled` | When enabled, data from Spark 
(non-native) CSV v1 and v2 scans will be converted to Arrow format. This is an 
experimental feature and has known issues with non-UTC timezones. | false |
+| `spark.comet.convert.json.enabled` | When enabled, data from Spark 
(non-native) JSON v1 and v2 scans will be converted to Arrow format. This is an 
experimental feature and has known issues with non-UTC timezones. | false |
+| `spark.comet.convert.parquet.enabled` | When enabled, data from Spark 
(non-native) Parquet v1 and v2 scans will be converted to Arrow format.  This 
is an experimental feature and has known issues with non-UTC timezones. | false 
|
+| `spark.comet.exec.onHeap.enabled` | Whether to allow Comet to run in on-heap 
mode. Required for running Spark SQL tests. It can be overridden by the 
environment variable `ENABLE_COMET_ONHEAP`. | false |
+| `spark.comet.exec.onHeap.memoryPool` | The type of memory pool to be used 
for Comet native execution when running Spark in on-heap mode. Available pool 
types are `greedy`, `fair_spill`, `greedy_task_shared`, 
`fair_spill_task_shared`, `greedy_global`, `fair_spill_global`, and 
`unbounded`. | greedy_task_shared |
+| `spark.comet.memoryOverhead` | The amount of additional memory to be 
allocated per executor process for Comet, in MiB, when running Spark in on-heap 
mode. | 1024 MiB |
+| `spark.comet.parquet.write.enabled` | Whether to enable native Parquet write 
through Comet. When enabled, Comet will intercept Parquet write operations and 
execute them natively. This feature is highly experimental and only partially 
implemented. It should not be used in production. | false |
+| `spark.comet.scan.csv.v2.enabled` | Whether to use the native Comet V2 CSV 
reader for improved performance. Default: false (uses standard Spark CSV 
reader) Experimental: Performance benefits are workload-dependent. | false |
+| `spark.comet.sparkToColumnar.enabled` | Whether to enable Spark to Arrow 
columnar conversion. When this is turned on, Comet will convert operators in 
`spark.comet.sparkToColumnar.supportedOperatorList` into Arrow columnar format 
before processing. This is an experimental feature and has known issues with 
non-UTC timezones. | false |
+| `spark.comet.sparkToColumnar.supportedOperatorList` | A comma-separated list 
of operators that will be converted to Arrow columnar format when 
`spark.comet.sparkToColumnar.enabled` is true. | 
Range,InMemoryTableScan,RDDScan |
+| `spark.comet.testing.strict` | Experimental option to enable strict testing, 
which will fail tests that could be more comprehensive, such as checking for a 
specific fallback reason. It can be overridden by the environment variable 
`ENABLE_COMET_STRICT_TESTING`. | false |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Enabling or Disabling Individual Operators
 
 <!--BEGIN:CONFIG_TABLE[enable_exec]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.exec.aggregate.enabled` | Whether to enable aggregate by 
default. | true |
+| `spark.comet.exec.broadcastExchange.enabled` | Whether to enable 
broadcastExchange by default. | true |
+| `spark.comet.exec.broadcastHashJoin.enabled` | Whether to enable 
broadcastHashJoin by default. | true |
+| `spark.comet.exec.coalesce.enabled` | Whether to enable coalesce by default. 
| true |
+| `spark.comet.exec.collectLimit.enabled` | Whether to enable collectLimit by 
default. | true |
+| `spark.comet.exec.expand.enabled` | Whether to enable expand by default. | 
true |
+| `spark.comet.exec.explode.enabled` | Whether to enable explode by default. | 
true |
+| `spark.comet.exec.filter.enabled` | Whether to enable filter by default. | 
true |
+| `spark.comet.exec.globalLimit.enabled` | Whether to enable globalLimit by 
default. | true |
+| `spark.comet.exec.hashJoin.enabled` | Whether to enable hashJoin by default. 
| true |
+| `spark.comet.exec.localLimit.enabled` | Whether to enable localLimit by 
default. | true |
+| `spark.comet.exec.localTableScan.enabled` | Whether to enable localTableScan 
by default. | false |
+| `spark.comet.exec.project.enabled` | Whether to enable project by default. | 
true |
+| `spark.comet.exec.sort.enabled` | Whether to enable sort by default. | true |
+| `spark.comet.exec.sortMergeJoin.enabled` | Whether to enable sortMergeJoin 
by default. | true |
+| `spark.comet.exec.sortMergeJoinWithJoinFilter.enabled` | Experimental 
support for Sort Merge Join with filter | false |
+| `spark.comet.exec.takeOrderedAndProject.enabled` | Whether to enable 
takeOrderedAndProject by default. | true |
+| `spark.comet.exec.union.enabled` | Whether to enable union by default. | 
true |
+| `spark.comet.exec.window.enabled` | Whether to enable window by default. | 
true |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Enabling or Disabling Individual Scalar Expressions
 
 <!--BEGIN:CONFIG_TABLE[enable_expr]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.expression.Abs.enabled` | Enable Comet acceleration for `Abs` | 
true |
+| `spark.comet.expression.Acos.enabled` | Enable Comet acceleration for `Acos` 
| true |
+| `spark.comet.expression.Add.enabled` | Enable Comet acceleration for `Add` | 
true |
+| `spark.comet.expression.Alias.enabled` | Enable Comet acceleration for 
`Alias` | true |
+| `spark.comet.expression.And.enabled` | Enable Comet acceleration for `And` | 
true |
+| `spark.comet.expression.ArrayAppend.enabled` | Enable Comet acceleration for 
`ArrayAppend` | true |
+| `spark.comet.expression.ArrayCompact.enabled` | Enable Comet acceleration 
for `ArrayCompact` | true |
+| `spark.comet.expression.ArrayContains.enabled` | Enable Comet acceleration 
for `ArrayContains` | true |
+| `spark.comet.expression.ArrayDistinct.enabled` | Enable Comet acceleration 
for `ArrayDistinct` | true |
+| `spark.comet.expression.ArrayExcept.enabled` | Enable Comet acceleration for 
`ArrayExcept` | true |
+| `spark.comet.expression.ArrayFilter.enabled` | Enable Comet acceleration for 
`ArrayFilter` | true |
+| `spark.comet.expression.ArrayInsert.enabled` | Enable Comet acceleration for 
`ArrayInsert` | true |
+| `spark.comet.expression.ArrayIntersect.enabled` | Enable Comet acceleration 
for `ArrayIntersect` | true |
+| `spark.comet.expression.ArrayJoin.enabled` | Enable Comet acceleration for 
`ArrayJoin` | true |
+| `spark.comet.expression.ArrayMax.enabled` | Enable Comet acceleration for 
`ArrayMax` | true |
+| `spark.comet.expression.ArrayMin.enabled` | Enable Comet acceleration for 
`ArrayMin` | true |
+| `spark.comet.expression.ArrayRemove.enabled` | Enable Comet acceleration for 
`ArrayRemove` | true |
+| `spark.comet.expression.ArrayRepeat.enabled` | Enable Comet acceleration for 
`ArrayRepeat` | true |
+| `spark.comet.expression.ArrayUnion.enabled` | Enable Comet acceleration for 
`ArrayUnion` | true |
+| `spark.comet.expression.ArraysOverlap.enabled` | Enable Comet acceleration 
for `ArraysOverlap` | true |
+| `spark.comet.expression.Ascii.enabled` | Enable Comet acceleration for 
`Ascii` | true |
+| `spark.comet.expression.Asin.enabled` | Enable Comet acceleration for `Asin` 
| true |
+| `spark.comet.expression.Atan.enabled` | Enable Comet acceleration for `Atan` 
| true |
+| `spark.comet.expression.Atan2.enabled` | Enable Comet acceleration for 
`Atan2` | true |
+| `spark.comet.expression.AttributeReference.enabled` | Enable Comet 
acceleration for `AttributeReference` | true |
+| `spark.comet.expression.BitLength.enabled` | Enable Comet acceleration for 
`BitLength` | true |
+| `spark.comet.expression.BitwiseAnd.enabled` | Enable Comet acceleration for 
`BitwiseAnd` | true |
+| `spark.comet.expression.BitwiseCount.enabled` | Enable Comet acceleration 
for `BitwiseCount` | true |
+| `spark.comet.expression.BitwiseGet.enabled` | Enable Comet acceleration for 
`BitwiseGet` | true |
+| `spark.comet.expression.BitwiseNot.enabled` | Enable Comet acceleration for 
`BitwiseNot` | true |
+| `spark.comet.expression.BitwiseOr.enabled` | Enable Comet acceleration for 
`BitwiseOr` | true |
+| `spark.comet.expression.BitwiseXor.enabled` | Enable Comet acceleration for 
`BitwiseXor` | true |
+| `spark.comet.expression.BloomFilterMightContain.enabled` | Enable Comet 
acceleration for `BloomFilterMightContain` | true |
+| `spark.comet.expression.CaseWhen.enabled` | Enable Comet acceleration for 
`CaseWhen` | true |
+| `spark.comet.expression.Cast.enabled` | Enable Comet acceleration for `Cast` 
| true |
+| `spark.comet.expression.Ceil.enabled` | Enable Comet acceleration for `Ceil` 
| true |
+| `spark.comet.expression.CheckOverflow.enabled` | Enable Comet acceleration 
for `CheckOverflow` | true |
+| `spark.comet.expression.Chr.enabled` | Enable Comet acceleration for `Chr` | 
true |
+| `spark.comet.expression.Coalesce.enabled` | Enable Comet acceleration for 
`Coalesce` | true |
+| `spark.comet.expression.Concat.enabled` | Enable Comet acceleration for 
`Concat` | true |
+| `spark.comet.expression.ConcatWs.enabled` | Enable Comet acceleration for 
`ConcatWs` | true |
+| `spark.comet.expression.Contains.enabled` | Enable Comet acceleration for 
`Contains` | true |
+| `spark.comet.expression.Cos.enabled` | Enable Comet acceleration for `Cos` | 
true |
+| `spark.comet.expression.Cosh.enabled` | Enable Comet acceleration for `Cosh` 
| true |
+| `spark.comet.expression.Cot.enabled` | Enable Comet acceleration for `Cot` | 
true |
+| `spark.comet.expression.CreateArray.enabled` | Enable Comet acceleration for 
`CreateArray` | true |
+| `spark.comet.expression.CreateNamedStruct.enabled` | Enable Comet 
acceleration for `CreateNamedStruct` | true |
+| `spark.comet.expression.DateAdd.enabled` | Enable Comet acceleration for 
`DateAdd` | true |
+| `spark.comet.expression.DateDiff.enabled` | Enable Comet acceleration for 
`DateDiff` | true |
+| `spark.comet.expression.DateFormatClass.enabled` | Enable Comet acceleration 
for `DateFormatClass` | true |
+| `spark.comet.expression.DateSub.enabled` | Enable Comet acceleration for 
`DateSub` | true |
+| `spark.comet.expression.DayOfMonth.enabled` | Enable Comet acceleration for 
`DayOfMonth` | true |
+| `spark.comet.expression.DayOfWeek.enabled` | Enable Comet acceleration for 
`DayOfWeek` | true |
+| `spark.comet.expression.DayOfYear.enabled` | Enable Comet acceleration for 
`DayOfYear` | true |
+| `spark.comet.expression.Divide.enabled` | Enable Comet acceleration for 
`Divide` | true |
+| `spark.comet.expression.ElementAt.enabled` | Enable Comet acceleration for 
`ElementAt` | true |
+| `spark.comet.expression.EndsWith.enabled` | Enable Comet acceleration for 
`EndsWith` | true |
+| `spark.comet.expression.EqualNullSafe.enabled` | Enable Comet acceleration 
for `EqualNullSafe` | true |
+| `spark.comet.expression.EqualTo.enabled` | Enable Comet acceleration for 
`EqualTo` | true |
+| `spark.comet.expression.Exp.enabled` | Enable Comet acceleration for `Exp` | 
true |
+| `spark.comet.expression.Expm1.enabled` | Enable Comet acceleration for 
`Expm1` | true |
+| `spark.comet.expression.Flatten.enabled` | Enable Comet acceleration for 
`Flatten` | true |
+| `spark.comet.expression.Floor.enabled` | Enable Comet acceleration for 
`Floor` | true |
+| `spark.comet.expression.FromUnixTime.enabled` | Enable Comet acceleration 
for `FromUnixTime` | true |
+| `spark.comet.expression.GetArrayItem.enabled` | Enable Comet acceleration 
for `GetArrayItem` | true |
+| `spark.comet.expression.GetArrayStructFields.enabled` | Enable Comet 
acceleration for `GetArrayStructFields` | true |
+| `spark.comet.expression.GetMapValue.enabled` | Enable Comet acceleration for 
`GetMapValue` | true |
+| `spark.comet.expression.GetStructField.enabled` | Enable Comet acceleration 
for `GetStructField` | true |
+| `spark.comet.expression.GreaterThan.enabled` | Enable Comet acceleration for 
`GreaterThan` | true |
+| `spark.comet.expression.GreaterThanOrEqual.enabled` | Enable Comet 
acceleration for `GreaterThanOrEqual` | true |
+| `spark.comet.expression.Hex.enabled` | Enable Comet acceleration for `Hex` | 
true |
+| `spark.comet.expression.Hour.enabled` | Enable Comet acceleration for `Hour` 
| true |
+| `spark.comet.expression.If.enabled` | Enable Comet acceleration for `If` | 
true |
+| `spark.comet.expression.In.enabled` | Enable Comet acceleration for `In` | 
true |
+| `spark.comet.expression.InSet.enabled` | Enable Comet acceleration for 
`InSet` | true |
+| `spark.comet.expression.InitCap.enabled` | Enable Comet acceleration for 
`InitCap` | true |
+| `spark.comet.expression.IntegralDivide.enabled` | Enable Comet acceleration 
for `IntegralDivide` | true |
+| `spark.comet.expression.IsNaN.enabled` | Enable Comet acceleration for 
`IsNaN` | true |
+| `spark.comet.expression.IsNotNull.enabled` | Enable Comet acceleration for 
`IsNotNull` | true |
+| `spark.comet.expression.IsNull.enabled` | Enable Comet acceleration for 
`IsNull` | true |
+| `spark.comet.expression.JsonToStructs.enabled` | Enable Comet acceleration 
for `JsonToStructs` | true |
+| `spark.comet.expression.KnownFloatingPointNormalized.enabled` | Enable Comet 
acceleration for `KnownFloatingPointNormalized` | true |
+| `spark.comet.expression.LastDay.enabled` | Enable Comet acceleration for 
`LastDay` | true |
+| `spark.comet.expression.Left.enabled` | Enable Comet acceleration for `Left` 
| true |
+| `spark.comet.expression.Length.enabled` | Enable Comet acceleration for 
`Length` | true |
+| `spark.comet.expression.LessThan.enabled` | Enable Comet acceleration for 
`LessThan` | true |
+| `spark.comet.expression.LessThanOrEqual.enabled` | Enable Comet acceleration 
for `LessThanOrEqual` | true |
+| `spark.comet.expression.Like.enabled` | Enable Comet acceleration for `Like` 
| true |
+| `spark.comet.expression.Literal.enabled` | Enable Comet acceleration for 
`Literal` | true |
+| `spark.comet.expression.Log.enabled` | Enable Comet acceleration for `Log` | 
true |
+| `spark.comet.expression.Log10.enabled` | Enable Comet acceleration for 
`Log10` | true |
+| `spark.comet.expression.Log2.enabled` | Enable Comet acceleration for `Log2` 
| true |
+| `spark.comet.expression.Lower.enabled` | Enable Comet acceleration for 
`Lower` | true |
+| `spark.comet.expression.MakeDecimal.enabled` | Enable Comet acceleration for 
`MakeDecimal` | true |
+| `spark.comet.expression.MapEntries.enabled` | Enable Comet acceleration for 
`MapEntries` | true |
+| `spark.comet.expression.MapFromArrays.enabled` | Enable Comet acceleration 
for `MapFromArrays` | true |
+| `spark.comet.expression.MapKeys.enabled` | Enable Comet acceleration for 
`MapKeys` | true |
+| `spark.comet.expression.MapValues.enabled` | Enable Comet acceleration for 
`MapValues` | true |
+| `spark.comet.expression.Md5.enabled` | Enable Comet acceleration for `Md5` | 
true |
+| `spark.comet.expression.Minute.enabled` | Enable Comet acceleration for 
`Minute` | true |
+| `spark.comet.expression.MonotonicallyIncreasingID.enabled` | Enable Comet 
acceleration for `MonotonicallyIncreasingID` | true |
+| `spark.comet.expression.Month.enabled` | Enable Comet acceleration for 
`Month` | true |
+| `spark.comet.expression.Multiply.enabled` | Enable Comet acceleration for 
`Multiply` | true |
+| `spark.comet.expression.Murmur3Hash.enabled` | Enable Comet acceleration for 
`Murmur3Hash` | true |
+| `spark.comet.expression.Not.enabled` | Enable Comet acceleration for `Not` | 
true |
+| `spark.comet.expression.OctetLength.enabled` | Enable Comet acceleration for 
`OctetLength` | true |
+| `spark.comet.expression.Or.enabled` | Enable Comet acceleration for `Or` | 
true |
+| `spark.comet.expression.Pow.enabled` | Enable Comet acceleration for `Pow` | 
true |
+| `spark.comet.expression.Quarter.enabled` | Enable Comet acceleration for 
`Quarter` | true |
+| `spark.comet.expression.RLike.enabled` | Enable Comet acceleration for 
`RLike` | true |
+| `spark.comet.expression.Rand.enabled` | Enable Comet acceleration for `Rand` 
| true |
+| `spark.comet.expression.Randn.enabled` | Enable Comet acceleration for 
`Randn` | true |
+| `spark.comet.expression.RegExpReplace.enabled` | Enable Comet acceleration 
for `RegExpReplace` | true |
+| `spark.comet.expression.Remainder.enabled` | Enable Comet acceleration for 
`Remainder` | true |
+| `spark.comet.expression.Reverse.enabled` | Enable Comet acceleration for 
`Reverse` | true |
+| `spark.comet.expression.Round.enabled` | Enable Comet acceleration for 
`Round` | true |
+| `spark.comet.expression.ScalarSubquery.enabled` | Enable Comet acceleration 
for `ScalarSubquery` | true |
+| `spark.comet.expression.Second.enabled` | Enable Comet acceleration for 
`Second` | true |
+| `spark.comet.expression.Sha1.enabled` | Enable Comet acceleration for `Sha1` 
| true |
+| `spark.comet.expression.Sha2.enabled` | Enable Comet acceleration for `Sha2` 
| true |
+| `spark.comet.expression.ShiftLeft.enabled` | Enable Comet acceleration for 
`ShiftLeft` | true |
+| `spark.comet.expression.ShiftRight.enabled` | Enable Comet acceleration for 
`ShiftRight` | true |
+| `spark.comet.expression.Signum.enabled` | Enable Comet acceleration for 
`Signum` | true |
+| `spark.comet.expression.Sin.enabled` | Enable Comet acceleration for `Sin` | 
true |
+| `spark.comet.expression.Sinh.enabled` | Enable Comet acceleration for `Sinh` 
| true |
+| `spark.comet.expression.Size.enabled` | Enable Comet acceleration for `Size` 
| true |
+| `spark.comet.expression.SortOrder.enabled` | Enable Comet acceleration for 
`SortOrder` | true |
+| `spark.comet.expression.SparkPartitionID.enabled` | Enable Comet 
acceleration for `SparkPartitionID` | true |
+| `spark.comet.expression.Sqrt.enabled` | Enable Comet acceleration for `Sqrt` 
| true |
+| `spark.comet.expression.StartsWith.enabled` | Enable Comet acceleration for 
`StartsWith` | true |
+| `spark.comet.expression.StaticInvoke.enabled` | Enable Comet acceleration 
for `StaticInvoke` | true |
+| `spark.comet.expression.StringInstr.enabled` | Enable Comet acceleration for 
`StringInstr` | true |
+| `spark.comet.expression.StringLPad.enabled` | Enable Comet acceleration for 
`StringLPad` | true |
+| `spark.comet.expression.StringRPad.enabled` | Enable Comet acceleration for 
`StringRPad` | true |
+| `spark.comet.expression.StringRepeat.enabled` | Enable Comet acceleration 
for `StringRepeat` | true |
+| `spark.comet.expression.StringReplace.enabled` | Enable Comet acceleration 
for `StringReplace` | true |
+| `spark.comet.expression.StringSpace.enabled` | Enable Comet acceleration for 
`StringSpace` | true |
+| `spark.comet.expression.StringTranslate.enabled` | Enable Comet acceleration 
for `StringTranslate` | true |
+| `spark.comet.expression.StringTrim.enabled` | Enable Comet acceleration for 
`StringTrim` | true |
+| `spark.comet.expression.StringTrimBoth.enabled` | Enable Comet acceleration 
for `StringTrimBoth` | true |
+| `spark.comet.expression.StringTrimLeft.enabled` | Enable Comet acceleration 
for `StringTrimLeft` | true |
+| `spark.comet.expression.StringTrimRight.enabled` | Enable Comet acceleration 
for `StringTrimRight` | true |
+| `spark.comet.expression.StructsToJson.enabled` | Enable Comet acceleration 
for `StructsToJson` | true |
+| `spark.comet.expression.Substring.enabled` | Enable Comet acceleration for 
`Substring` | true |
+| `spark.comet.expression.Subtract.enabled` | Enable Comet acceleration for 
`Subtract` | true |
+| `spark.comet.expression.Tan.enabled` | Enable Comet acceleration for `Tan` | 
true |
+| `spark.comet.expression.Tanh.enabled` | Enable Comet acceleration for `Tanh` 
| true |
+| `spark.comet.expression.TruncDate.enabled` | Enable Comet acceleration for 
`TruncDate` | true |
+| `spark.comet.expression.TruncTimestamp.enabled` | Enable Comet acceleration 
for `TruncTimestamp` | true |
+| `spark.comet.expression.UnaryMinus.enabled` | Enable Comet acceleration for 
`UnaryMinus` | true |
+| `spark.comet.expression.Unhex.enabled` | Enable Comet acceleration for 
`Unhex` | true |
+| `spark.comet.expression.UnixDate.enabled` | Enable Comet acceleration for 
`UnixDate` | true |
+| `spark.comet.expression.UnixTimestamp.enabled` | Enable Comet acceleration 
for `UnixTimestamp` | true |
+| `spark.comet.expression.UnscaledValue.enabled` | Enable Comet acceleration 
for `UnscaledValue` | true |
+| `spark.comet.expression.Upper.enabled` | Enable Comet acceleration for 
`Upper` | true |
+| `spark.comet.expression.WeekDay.enabled` | Enable Comet acceleration for 
`WeekDay` | true |
+| `spark.comet.expression.WeekOfYear.enabled` | Enable Comet acceleration for 
`WeekOfYear` | true |
+| `spark.comet.expression.XxHash64.enabled` | Enable Comet acceleration for 
`XxHash64` | true |
+| `spark.comet.expression.Year.enabled` | Enable Comet acceleration for `Year` 
| true |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
 
 ## Enabling or Disabling Individual Aggregate Expressions
 
 <!--BEGIN:CONFIG_TABLE[enable_agg_expr]-->
+<!-- prettier-ignore-start -->
+| Config | Description | Default Value |
+|--------|-------------|---------------|
+| `spark.comet.expression.Average.enabled` | Enable Comet acceleration for 
`Average` | true |
+| `spark.comet.expression.BitAndAgg.enabled` | Enable Comet acceleration for 
`BitAndAgg` | true |
+| `spark.comet.expression.BitOrAgg.enabled` | Enable Comet acceleration for 
`BitOrAgg` | true |
+| `spark.comet.expression.BitXorAgg.enabled` | Enable Comet acceleration for 
`BitXorAgg` | true |
+| `spark.comet.expression.BloomFilterAggregate.enabled` | Enable Comet 
acceleration for `BloomFilterAggregate` | true |
+| `spark.comet.expression.Corr.enabled` | Enable Comet acceleration for `Corr` 
| true |
+| `spark.comet.expression.Count.enabled` | Enable Comet acceleration for 
`Count` | true |
+| `spark.comet.expression.CovPopulation.enabled` | Enable Comet acceleration 
for `CovPopulation` | true |
+| `spark.comet.expression.CovSample.enabled` | Enable Comet acceleration for 
`CovSample` | true |
+| `spark.comet.expression.First.enabled` | Enable Comet acceleration for 
`First` | true |
+| `spark.comet.expression.Last.enabled` | Enable Comet acceleration for `Last` 
| true |
+| `spark.comet.expression.Max.enabled` | Enable Comet acceleration for `Max` | 
true |
+| `spark.comet.expression.Min.enabled` | Enable Comet acceleration for `Min` | 
true |
+| `spark.comet.expression.StddevPop.enabled` | Enable Comet acceleration for 
`StddevPop` | true |
+| `spark.comet.expression.StddevSamp.enabled` | Enable Comet acceleration for 
`StddevSamp` | true |
+| `spark.comet.expression.Sum.enabled` | Enable Comet acceleration for `Sum` | 
true |
+| `spark.comet.expression.VariancePop.enabled` | Enable Comet acceleration for 
`VariancePop` | true |
+| `spark.comet.expression.VarianceSamp.enabled` | Enable Comet acceleration 
for `VarianceSamp` | true |
+<!-- prettier-ignore-end -->
 <!--END:CONFIG_TABLE-->
diff --git a/fuzz-testing/pom.xml b/fuzz-testing/pom.xml
index 35beddd2a..ba6d32278 100644
--- a/fuzz-testing/pom.xml
+++ b/fuzz-testing/pom.xml
@@ -25,7 +25,7 @@ under the License.
     <parent>
         <groupId>org.apache.datafusion</groupId>
         
<artifactId>comet-parent-spark${spark.version.short}_${scala.binary.version}</artifactId>
-        <version>0.13.0-SNAPSHOT</version>
+        <version>0.13.0</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
diff --git a/native/Cargo.lock b/native/Cargo.lock
index 2e53b3c27..c7fd984a3 100644
--- a/native/Cargo.lock
+++ b/native/Cargo.lock
@@ -601,9 +601,9 @@ dependencies = [
 
 [[package]]
 name = "aws-lc-rs"
-version = "1.15.3"
+version = "1.15.4"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "e84ce723ab67259cfeb9877c6a639ee9eb7a27b28123abd71db7f0d5d0cc9d86"
+checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256"
 dependencies = [
  "aws-lc-sys",
  "zeroize",
@@ -611,9 +611,9 @@ dependencies = [
 
 [[package]]
 name = "aws-lc-sys"
-version = "0.36.0"
+version = "0.37.0"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "43a442ece363113bd4bd4c8b18977a7798dd4d3c3383f34fb61936960e8f4ad8"
+checksum = "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a"
 dependencies = [
  "cc",
  "cmake",
@@ -1242,9 +1242,9 @@ checksum = 
"37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
 
 [[package]]
 name = "cc"
-version = "1.2.53"
+version = "1.2.54"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932"
+checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583"
 dependencies = [
  "find-msvc-tools",
  "jobserver",
@@ -3711,9 +3711,9 @@ dependencies = [
 
 [[package]]
 name = "libm"
-version = "0.2.15"
+version = "0.2.16"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de"
+checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981"
 
 [[package]]
 name = "libmimalloc-sys"
@@ -3892,9 +3892,9 @@ checksum = 
"dce6dd36094cac388f119d2e9dc82dc730ef91c32a6222170d630e5414b956e6"
 
 [[package]]
 name = "moka"
-version = "0.12.12"
+version = "0.12.13"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "a3dec6bd31b08944e08b58fd99373893a6c17054d6f3ea5006cc894f4f4eee2a"
+checksum = "b4ac832c50ced444ef6be0767a008b02c106a909ba79d1d830501e94b96f6b7e"
 dependencies = [
  "async-lock",
  "crossbeam-channel",
@@ -3979,9 +3979,9 @@ dependencies = [
 
 [[package]]
 name = "num-conv"
-version = "0.1.0"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
+checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
 
 [[package]]
 name = "num-format"
@@ -4141,9 +4141,9 @@ dependencies = [
 
 [[package]]
 name = "openssl-probe"
-version = "0.2.0"
+version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391"
+checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
 
 [[package]]
 name = "ordered-float"
@@ -4514,9 +4514,9 @@ dependencies = [
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.105"
+version = "1.0.106"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7"
+checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934"
 dependencies = [
  "unicode-ident",
 ]
@@ -4708,9 +4708,9 @@ dependencies = [
 
 [[package]]
 name = "quote"
-version = "1.0.43"
+version = "1.0.44"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a"
+checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4"
 dependencies = [
  "proc-macro2",
 ]
@@ -5492,9 +5492,9 @@ checksum = 
"1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b"
 
 [[package]]
 name = "socket2"
-version = "0.6.1"
+version = "0.6.2"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881"
+checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0"
 dependencies = [
  "libc",
  "windows-sys 0.60.2",
@@ -5759,9 +5759,9 @@ dependencies = [
 
 [[package]]
 name = "time"
-version = "0.3.45"
+version = "0.3.46"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd"
+checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5"
 dependencies = [
  "deranged",
  "itoa",
@@ -5774,15 +5774,15 @@ dependencies = [
 
 [[package]]
 name = "time-core"
-version = "0.1.7"
+version = "0.1.8"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca"
+checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca"
 
 [[package]]
 name = "time-macros"
-version = "0.2.25"
+version = "0.2.26"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd"
+checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4"
 dependencies = [
  "num-conv",
  "time-core",
@@ -6110,9 +6110,9 @@ checksum = 
"b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
 
 [[package]]
 name = "uuid"
-version = "1.19.0"
+version = "1.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a"
+checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f"
 dependencies = [
  "getrandom 0.3.4",
  "js-sys",
@@ -6676,18 +6676,18 @@ dependencies = [
 
 [[package]]
 name = "zerocopy"
-version = "0.8.33"
+version = "0.8.34"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd"
+checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d"
 dependencies = [
  "zerocopy-derive",
 ]
 
 [[package]]
 name = "zerocopy-derive"
-version = "0.8.33"
+version = "0.8.34"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1"
+checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -6762,9 +6762,9 @@ checksum = 
"40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3"
 
 [[package]]
 name = "zmij"
-version = "1.0.16"
+version = "1.0.17"
 source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65"
+checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439"
 
 [[package]]
 name = "zstd"
diff --git a/pom.xml b/pom.xml
index ab42aa773..2d9df9852 100644
--- a/pom.xml
+++ b/pom.xml
@@ -30,7 +30,7 @@ under the License.
   </parent>
   <groupId>org.apache.datafusion</groupId>
   
<artifactId>comet-parent-spark${spark.version.short}_${scala.binary.version}</artifactId>
-  <version>0.13.0-SNAPSHOT</version>
+  <version>0.13.0</version>
   <packaging>pom</packaging>
   <name>Comet Project Parent POM</name>
 
diff --git a/spark-integration/pom.xml b/spark-integration/pom.xml
index c1da41721..946387edf 100644
--- a/spark-integration/pom.xml
+++ b/spark-integration/pom.xml
@@ -26,7 +26,7 @@ under the License.
     <parent>
         <groupId>org.apache.datafusion</groupId>
         
<artifactId>comet-parent-spark${spark.version.short}_${scala.binary.version}</artifactId>
-        <version>0.13.0-SNAPSHOT</version>
+        <version>0.13.0</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
diff --git a/spark/pom.xml b/spark/pom.xml
index a113e145f..27373042a 100644
--- a/spark/pom.xml
+++ b/spark/pom.xml
@@ -26,7 +26,7 @@ under the License.
   <parent>
     <groupId>org.apache.datafusion</groupId>
     
<artifactId>comet-parent-spark${spark.version.short}_${scala.binary.version}</artifactId>
-    <version>0.13.0-SNAPSHOT</version>
+    <version>0.13.0</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -256,6 +256,34 @@ under the License.
         </dependency>
       </dependencies>
     </profile>
+    <profile>
+      <id>generate-docs</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
+            <version>${exec-maven-plugin.version}</version>
+            <executions>
+              <execution>
+                <id>generate-user-guide-reference-docs</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>java</goal>
+                </goals>
+                <configuration>
+                  <mainClass>org.apache.comet.GenerateDocs</mainClass>
+                  <arguments>
+                    
<argument>${project.parent.basedir}/docs/source/user-guide/latest/</argument>
+                  </arguments>
+                  <classpathScope>compile</classpathScope>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
   </profiles>
 
   <build>


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to