This is an automated email from the ASF dual-hosted git repository.

alamb pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow-datafusion.git


The following commit(s) were added to refs/heads/main by this push:
     new dfad0afc34 refactor: change file type logic for create table (#7477)
dfad0afc34 is described below

commit dfad0afc34516cbe816484e2b414cc5942eac9a7
Author: Trent Hauck <[email protected]>
AuthorDate: Wed Sep 6 09:39:39 2023 -0700

    refactor: change file type logic for create table (#7477)
    
    * refactor: change file type logic for create table
    
    * style: run rust fmt
---
 datafusion/sql/src/statement.rs         | 7 ++++---
 datafusion/sql/tests/sql_integration.rs | 5 ++++-
 docs/source/user-guide/sql/ddl.md       | 2 +-
 3 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/datafusion/sql/src/statement.rs b/datafusion/sql/src/statement.rs
index 0b0c391134..435cbdf0b2 100644
--- a/datafusion/sql/src/statement.rs
+++ b/datafusion/sql/src/statement.rs
@@ -681,11 +681,12 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> {
             options,
         } = statement;
 
-        if file_type != "CSV"
-            && file_type != "JSON"
+        if (file_type == "PARQUET" || file_type == "AVRO" || file_type == 
"ARROW")
             && file_compression_type != CompressionTypeVariant::UNCOMPRESSED
         {
-            plan_err!("File compression type can be specified for CSV/JSON 
files.")?;
+            plan_err!(
+                "File compression type cannot be set for PARQUET, AVRO, or 
ARROW files."
+            )?;
         }
 
         let schema = self.build_schema(columns)?;
diff --git a/datafusion/sql/tests/sql_integration.rs 
b/datafusion/sql/tests/sql_integration.rs
index 07112184bf..154bd3f9a0 100644
--- a/datafusion/sql/tests/sql_integration.rs
+++ b/datafusion/sql/tests/sql_integration.rs
@@ -1850,6 +1850,7 @@ fn create_external_table_with_compression_type() {
             "CREATE EXTERNAL TABLE t(c1 int) STORED AS CSV COMPRESSION TYPE 
BZIP2 LOCATION 'foo.csv.bz2'",
             "CREATE EXTERNAL TABLE t(c1 int) STORED AS JSON COMPRESSION TYPE 
GZIP LOCATION 'foo.json.gz'",
             "CREATE EXTERNAL TABLE t(c1 int) STORED AS JSON COMPRESSION TYPE 
BZIP2 LOCATION 'foo.json.bz2'",
+            "CREATE EXTERNAL TABLE t(c1 int) STORED AS NONSTANDARD COMPRESSION 
TYPE GZIP LOCATION 'foo.unk'",
         ];
     for sql in sqls {
         let expected = "CreateExternalTable: Bare { table: \"t\" }";
@@ -1862,11 +1863,13 @@ fn create_external_table_with_compression_type() {
         "CREATE EXTERNAL TABLE t STORED AS AVRO COMPRESSION TYPE BZIP2 
LOCATION 'foo.avro'",
         "CREATE EXTERNAL TABLE t STORED AS PARQUET COMPRESSION TYPE GZIP 
LOCATION 'foo.parquet'",
         "CREATE EXTERNAL TABLE t STORED AS PARQUET COMPRESSION TYPE BZIP2 
LOCATION 'foo.parquet'",
+        "CREATE EXTERNAL TABLE t STORED AS ARROW COMPRESSION TYPE GZIP 
LOCATION 'foo.arrow'",
+        "CREATE EXTERNAL TABLE t STORED AS ARROW COMPRESSION TYPE BZIP2 
LOCATION 'foo.arrow'",
     ];
     for sql in sqls {
         let err = logical_plan(sql).expect_err("query should have failed");
         assert_eq!(
-            "Plan(\"File compression type can be specified for CSV/JSON 
files.\")",
+            "Plan(\"File compression type cannot be set for PARQUET, AVRO, or 
ARROW files.\")",
             format!("{err:?}")
         );
     }
diff --git a/docs/source/user-guide/sql/ddl.md 
b/docs/source/user-guide/sql/ddl.md
index f566b8342e..751159c305 100644
--- a/docs/source/user-guide/sql/ddl.md
+++ b/docs/source/user-guide/sql/ddl.md
@@ -79,7 +79,7 @@ LOCATION <literal>
 
 `file_type` is one of `CSV`, `PARQUET`, `AVRO` or `JSON`
 
-`LOCATION <literal>` specfies the location to find the data. It can be
+`LOCATION <literal>` specifies the location to find the data. It can be
 a path to a file or directory of partitioned files locally or on an
 object store.
 

Reply via email to