This is an automated email from the ASF dual-hosted git repository.
alamb pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/datafusion.git
The following commit(s) were added to refs/heads/main by this push:
new 161d0f2b53 fix dml logical plan output schema (#10394)
161d0f2b53 is described below
commit 161d0f2b53f50aa81c5887080ccc4c1c4c847e70
Author: Leonardo Yvens <[email protected]>
AuthorDate: Tue May 7 18:24:38 2024 +0100
fix dml logical plan output schema (#10394)
* fix dml logical plan output schema
Previously, `LogicalPlan::schema` would return the
input schema for Dml plans, rather than the expected
output schema. This is an unusal case since Dmls are
typically not run for their output, but it is typical
for the output to be the `count` of rows affected by
the DML statement.
See `fn dml_output_schema` for a test.
* document DmlStatement::new
* Fix expected logical schema of 'insert into' in sqllogictests
---
datafusion/core/tests/sql/sql_api.rs | 13 +++++++
datafusion/expr/src/logical_plan/builder.rs | 8 ++--
datafusion/expr/src/logical_plan/dml.rs | 29 ++++++++++++++
datafusion/expr/src/logical_plan/plan.rs | 14 +++----
datafusion/expr/src/logical_plan/tree_node.rs | 2 +
datafusion/sql/src/statement.rs | 28 +++++++-------
datafusion/sqllogictest/test_files/aggregate.slt | 2 +-
datafusion/sqllogictest/test_files/array.slt | 2 +-
.../test_files/create_external_table.slt | 6 +--
datafusion/sqllogictest/test_files/insert.slt | 28 +++++++-------
.../sqllogictest/test_files/insert_to_external.slt | 44 +++++++++++-----------
datafusion/sqllogictest/test_files/math.slt | 12 +++---
12 files changed, 116 insertions(+), 72 deletions(-)
diff --git a/datafusion/core/tests/sql/sql_api.rs
b/datafusion/core/tests/sql/sql_api.rs
index b3a819fbc3..4a6424fc24 100644
--- a/datafusion/core/tests/sql/sql_api.rs
+++ b/datafusion/core/tests/sql/sql_api.rs
@@ -58,6 +58,19 @@ async fn unsupported_dml_returns_error() {
ctx.sql_with_options(sql, options).await.unwrap();
}
+#[tokio::test]
+async fn dml_output_schema() {
+ use arrow::datatypes::Schema;
+ use arrow::datatypes::{DataType, Field};
+
+ let ctx = SessionContext::new();
+ ctx.sql("CREATE TABLE test (x int)").await.unwrap();
+ let sql = "INSERT INTO test VALUES (1)";
+ let df = ctx.sql(sql).await.unwrap();
+ let count_schema = Schema::new(vec![Field::new("count", DataType::UInt64,
false)]);
+ assert_eq!(Schema::from(df.schema()), count_schema);
+}
+
#[tokio::test]
async fn unsupported_copy_returns_error() {
let tmpdir = TempDir::new().unwrap();
diff --git a/datafusion/expr/src/logical_plan/builder.rs
b/datafusion/expr/src/logical_plan/builder.rs
index 43873cb90c..7b1e449801 100644
--- a/datafusion/expr/src/logical_plan/builder.rs
+++ b/datafusion/expr/src/logical_plan/builder.rs
@@ -296,12 +296,12 @@ impl LogicalPlanBuilder {
WriteOp::InsertInto
};
- Ok(Self::from(LogicalPlan::Dml(DmlStatement {
- table_name: table_name.into(),
+ Ok(Self::from(LogicalPlan::Dml(DmlStatement::new(
+ table_name.into(),
table_schema,
op,
- input: Arc::new(input),
- })))
+ Arc::new(input),
+ ))))
}
/// Convert a table provider into a builder with a TableScan
diff --git a/datafusion/expr/src/logical_plan/dml.rs
b/datafusion/expr/src/logical_plan/dml.rs
index 9c0fe0f304..13f3759ab8 100644
--- a/datafusion/expr/src/logical_plan/dml.rs
+++ b/datafusion/expr/src/logical_plan/dml.rs
@@ -20,6 +20,7 @@ use std::fmt::{self, Display};
use std::hash::{Hash, Hasher};
use std::sync::Arc;
+use arrow::datatypes::{DataType, Field, Schema};
use datafusion_common::config::FormatOptions;
use datafusion_common::{DFSchemaRef, TableReference};
@@ -70,9 +71,29 @@ pub struct DmlStatement {
pub op: WriteOp,
/// The relation that determines the tuples to add/remove/modify the
schema must match with table_schema
pub input: Arc<LogicalPlan>,
+ /// The schema of the output relation
+ pub output_schema: DFSchemaRef,
}
impl DmlStatement {
+ /// Creates a new DML statement with the output schema set to a single
`count` column.
+ pub fn new(
+ table_name: TableReference,
+ table_schema: DFSchemaRef,
+ op: WriteOp,
+ input: Arc<LogicalPlan>,
+ ) -> Self {
+ Self {
+ table_name,
+ table_schema,
+ op,
+ input,
+
+ // The output schema is always a single column with the number of
rows affected
+ output_schema: make_count_schema(),
+ }
+ }
+
/// Return a descriptive name of this [`DmlStatement`]
pub fn name(&self) -> &str {
self.op.name()
@@ -106,3 +127,11 @@ impl Display for WriteOp {
write!(f, "{}", self.name())
}
}
+
+fn make_count_schema() -> DFSchemaRef {
+ Arc::new(
+ Schema::new(vec![Field::new("count", DataType::UInt64, false)])
+ .try_into()
+ .unwrap(),
+ )
+}
diff --git a/datafusion/expr/src/logical_plan/plan.rs
b/datafusion/expr/src/logical_plan/plan.rs
index 23f5280377..c608b51e08 100644
--- a/datafusion/expr/src/logical_plan/plan.rs
+++ b/datafusion/expr/src/logical_plan/plan.rs
@@ -191,7 +191,7 @@ impl LogicalPlan {
LogicalPlan::DescribeTable(DescribeTable { output_schema, .. }) =>
{
output_schema
}
- LogicalPlan::Dml(DmlStatement { table_schema, .. }) =>
table_schema,
+ LogicalPlan::Dml(DmlStatement { output_schema, .. }) =>
output_schema,
LogicalPlan::Copy(CopyTo { input, .. }) => input.schema(),
LogicalPlan::Ddl(ddl) => ddl.schema(),
LogicalPlan::Unnest(Unnest { schema, .. }) => schema,
@@ -509,12 +509,12 @@ impl LogicalPlan {
table_schema,
op,
..
- }) => Ok(LogicalPlan::Dml(DmlStatement {
- table_name: table_name.clone(),
- table_schema: table_schema.clone(),
- op: op.clone(),
- input: Arc::new(inputs.swap_remove(0)),
- })),
+ }) => Ok(LogicalPlan::Dml(DmlStatement::new(
+ table_name.clone(),
+ table_schema.clone(),
+ op.clone(),
+ Arc::new(inputs.swap_remove(0)),
+ ))),
LogicalPlan::Copy(CopyTo {
input: _,
output_url,
diff --git a/datafusion/expr/src/logical_plan/tree_node.rs
b/datafusion/expr/src/logical_plan/tree_node.rs
index 37a36c36ca..2289eb1639 100644
--- a/datafusion/expr/src/logical_plan/tree_node.rs
+++ b/datafusion/expr/src/logical_plan/tree_node.rs
@@ -242,12 +242,14 @@ impl TreeNode for LogicalPlan {
table_schema,
op,
input,
+ output_schema,
}) => rewrite_arc(input, f)?.update_data(|input| {
LogicalPlan::Dml(DmlStatement {
table_name,
table_schema,
op,
input,
+ output_schema,
})
}),
LogicalPlan::Copy(CopyTo {
diff --git a/datafusion/sql/src/statement.rs b/datafusion/sql/src/statement.rs
index 137bb5fb20..aee21497b1 100644
--- a/datafusion/sql/src/statement.rs
+++ b/datafusion/sql/src/statement.rs
@@ -1206,12 +1206,12 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> {
}
};
- let plan = LogicalPlan::Dml(DmlStatement {
- table_name: table_ref,
- table_schema: schema.into(),
- op: WriteOp::Delete,
- input: Arc::new(source),
- });
+ let plan = LogicalPlan::Dml(DmlStatement::new(
+ table_ref,
+ schema.into(),
+ WriteOp::Delete,
+ Arc::new(source),
+ ));
Ok(plan)
}
@@ -1318,12 +1318,12 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> {
let source = project(source, exprs)?;
- let plan = LogicalPlan::Dml(DmlStatement {
+ let plan = LogicalPlan::Dml(DmlStatement::new(
table_name,
table_schema,
- op: WriteOp::Update,
- input: Arc::new(source),
- });
+ WriteOp::Update,
+ Arc::new(source),
+ ));
Ok(plan)
}
@@ -1441,12 +1441,12 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> {
WriteOp::InsertInto
};
- let plan = LogicalPlan::Dml(DmlStatement {
+ let plan = LogicalPlan::Dml(DmlStatement::new(
table_name,
- table_schema: Arc::new(table_schema),
+ Arc::new(table_schema),
op,
- input: Arc::new(source),
- });
+ Arc::new(source),
+ ));
Ok(plan)
}
diff --git a/datafusion/sqllogictest/test_files/aggregate.slt
b/datafusion/sqllogictest/test_files/aggregate.slt
index ed9ea3f1dc..bc677b73fb 100644
--- a/datafusion/sqllogictest/test_files/aggregate.slt
+++ b/datafusion/sqllogictest/test_files/aggregate.slt
@@ -3477,7 +3477,7 @@ SELECT STRING_AGG(column1, '|') FROM (values (''),
(null), (''));
statement ok
CREATE TABLE strings(g INTEGER, x VARCHAR, y VARCHAR)
-query ITT
+query I
INSERT INTO strings VALUES (1,'a','/'), (1,'b','-'), (2,'i','/'),
(2,NULL,'-'), (2,'j','+'), (3,'p','/'), (4,'x','/'), (4,'y','-'), (4,'z','+')
----
9
diff --git a/datafusion/sqllogictest/test_files/array.slt
b/datafusion/sqllogictest/test_files/array.slt
index 3b90187f07..eaec0f4d8d 100644
--- a/datafusion/sqllogictest/test_files/array.slt
+++ b/datafusion/sqllogictest/test_files/array.slt
@@ -6475,7 +6475,7 @@ create table test_create_array_table(
d int
);
-query ???I
+query I
insert into test_create_array_table values
([1, 2, 3], ['a', 'b', 'c'], [[4,6], [6,7,8]], 1);
----
diff --git a/datafusion/sqllogictest/test_files/create_external_table.slt
b/datafusion/sqllogictest/test_files/create_external_table.slt
index 8aeeb06c19..9f1fc523f5 100644
--- a/datafusion/sqllogictest/test_files/create_external_table.slt
+++ b/datafusion/sqllogictest/test_files/create_external_table.slt
@@ -130,7 +130,7 @@ PARTITIONED BY (p1 string, p2 string)
STORED AS parquet
LOCATION 'test_files/scratch/create_external_table/bad_partitioning/';
-query ITT
+query I
INSERT INTO partitioned VALUES (1, 'x', 'y');
----
1
@@ -186,13 +186,13 @@ PARTITIONED BY (month string, year string)
STORED AS parquet
LOCATION 'test_files/scratch/create_external_table/manual_partitioning/';
-query TTT
+query I
-- creates year -> month partitions
INSERT INTO test VALUES('name', '2024', '03');
----
1
-query TTT
+query I
-- creates month -> year partitions.
-- now table have both partitions (year -> month and month -> year)
INSERT INTO test2 VALUES('name', '2024', '03');
diff --git a/datafusion/sqllogictest/test_files/insert.slt
b/datafusion/sqllogictest/test_files/insert.slt
index 40e3908123..7c9bc4abe7 100644
--- a/datafusion/sqllogictest/test_files/insert.slt
+++ b/datafusion/sqllogictest/test_files/insert.slt
@@ -75,7 +75,7 @@ physical_plan
09)----------------RepartitionExec: partitioning=RoundRobinBatch(8),
input_partitions=1
10)------------------CsvExec: file_groups={1 group:
[[WORKSPACE_ROOT/testing/data/csv/aggregate_test_100.csv]]}, projection=[c1,
c4, c9], has_header=true
-query II
+query I
INSERT INTO table_without_values SELECT
SUM(c4) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING),
COUNT(*) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING)
@@ -137,7 +137,7 @@ physical_plan
-query II
+query I
INSERT INTO table_without_values SELECT
SUM(c4) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING) as a1,
COUNT(*) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING) as a2
@@ -187,7 +187,7 @@ physical_plan
10)------------------CsvExec: file_groups={1 group:
[[WORKSPACE_ROOT/testing/data/csv/aggregate_test_100.csv]]}, projection=[c1,
c4, c9], has_header=true
-query II
+query I
INSERT INTO table_without_values SELECT
SUM(c4) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING) as a1,
COUNT(*) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING) as a2
@@ -221,7 +221,7 @@ physical_plan
02)--SortExec: expr=[c1@0 ASC NULLS LAST], preserve_partitioning=[false]
03)----CsvExec: file_groups={1 group:
[[WORKSPACE_ROOT/testing/data/csv/aggregate_test_100.csv]]}, projection=[c1],
has_header=true
-query T
+query I
insert into table_without_values select c1 from aggregate_test_100 order by c1;
----
100
@@ -239,12 +239,12 @@ drop table table_without_values;
statement ok
CREATE TABLE table_without_values(id BIGINT, name varchar);
-query IT
+query I
insert into table_without_values(id, name) values(1, 'foo');
----
1
-query IT
+query I
insert into table_without_values(name, id) values('bar', 2);
----
1
@@ -259,7 +259,7 @@ statement error Error during planning: Column count doesn't
match insert query!
insert into table_without_values(id) values(4, 'zoo');
# insert NULL values for the missing column (name)
-query IT
+query I
insert into table_without_values(id) values(4);
----
1
@@ -279,18 +279,18 @@ drop table table_without_values;
statement ok
CREATE TABLE table_without_values(field1 BIGINT NOT NULL, field2 BIGINT NULL);
-query II
+query I
insert into table_without_values values(1, 100);
----
1
-query II
+query I
insert into table_without_values values(2, NULL);
----
1
# insert NULL values for the missing column (field2)
-query II
+query I
insert into table_without_values(field1) values(3);
----
1
@@ -363,7 +363,7 @@ create table test_column_defaults(
e timestamp default now()
)
-query IIITP
+query I
insert into test_column_defaults values(1, 10, 100, 'ABC', now())
----
1
@@ -371,7 +371,7 @@ insert into test_column_defaults values(1, 10, 100, 'ABC',
now())
statement error DataFusion error: Execution error: Invalid batch column at '1'
has null but schema specifies non-nullable
insert into test_column_defaults(a) values(2)
-query IIITP
+query I
insert into test_column_defaults(b) values(20)
----
1
@@ -383,7 +383,7 @@ select a,b,c,d from test_column_defaults
NULL 20 500 default_text
# fill the timestamp column with default value `now()` again, it should be
different from the previous one
-query IIITP
+query I
insert into test_column_defaults(a, b, c, d) values(2, 20, 200, 'DEF')
----
1
@@ -417,7 +417,7 @@ create table test_column_defaults(
e timestamp default now()
) as values(1, 10, 100, 'ABC', now())
-query IIITP
+query I
insert into test_column_defaults(b) values(20)
----
1
diff --git a/datafusion/sqllogictest/test_files/insert_to_external.slt
b/datafusion/sqllogictest/test_files/insert_to_external.slt
index 9a2a239a85..70eb2b75a7 100644
--- a/datafusion/sqllogictest/test_files/insert_to_external.slt
+++ b/datafusion/sqllogictest/test_files/insert_to_external.slt
@@ -60,7 +60,7 @@ STORED AS parquet
LOCATION 'test_files/scratch/insert_to_external/parquet_types_partitioned/'
PARTITIONED BY (b);
-query TT
+query I
insert into dictionary_encoded_parquet_partitioned
select * from dictionary_encoded_values
----
@@ -81,7 +81,7 @@ STORED AS arrow
LOCATION 'test_files/scratch/insert_to_external/arrow_dict_partitioned/'
PARTITIONED BY (b);
-query TT
+query I
insert into dictionary_encoded_arrow_partitioned
select * from dictionary_encoded_values
----
@@ -130,7 +130,7 @@ physical_plan
03)----ProjectionExec: expr=[column1@0 as a, column2@1 as b]
04)------ValuesExec
-query II
+query I
INSERT INTO ordered_insert_test values (5, 1), (4, 2), (7,7), (7,8), (7,9),
(7,10), (3, 3), (2, 4), (1, 5);
----
9
@@ -158,7 +158,7 @@ LOCATION
'test_files/scratch/insert_to_external/insert_to_partitioned/'
PARTITIONED BY (a, b);
#note that partitioned cols are moved to the end so value tuples are (c, a, b)
-query ITT
+query I
INSERT INTO partitioned_insert_test values (1, 10, 100), (1, 10, 200), (1, 20,
100), (1, 20, 200), (2, 20, 100), (2, 20, 200);
----
6
@@ -192,7 +192,7 @@ STORED AS csv
LOCATION 'test_files/scratch/insert_to_external/insert_to_partitioned'
PARTITIONED BY (a string, b string);
-query ITT
+query I
INSERT INTO partitioned_insert_test_hive VALUES (3,30,300);
----
1
@@ -216,7 +216,7 @@ STORED AS json
LOCATION 'test_files/scratch/insert_to_external/insert_to_partitioned_json/'
PARTITIONED BY (a);
-query TT
+query I
INSERT INTO partitioned_insert_test_json values (1, 2), (3, 4), (5, 6), (1,
2), (3, 4), (5, 6);
----
6
@@ -250,7 +250,7 @@ STORED AS parquet
LOCATION 'test_files/scratch/insert_to_external/insert_to_partitioned_pq/'
PARTITIONED BY (a);
-query IT
+query I
INSERT INTO partitioned_insert_test_pq values (1, 2), (3, 4), (5, 6), (1, 2),
(3, 4), (5, 6);
----
6
@@ -296,12 +296,12 @@ single_file_test(a bigint, b bigint)
STORED AS csv
LOCATION 'test_files/scratch/insert_to_external/single_csv_table.csv';
-query II
+query I
INSERT INTO single_file_test values (1, 2), (3, 4);
----
2
-query II
+query I
INSERT INTO single_file_test values (4, 5), (6, 7);
----
2
@@ -320,7 +320,7 @@ directory_test(a bigint, b bigint)
STORED AS parquet
LOCATION 'test_files/scratch/insert_to_external/external_parquet_table_q0/';
-query II
+query I
INSERT INTO directory_test values (1, 2), (3, 4);
----
2
@@ -364,7 +364,7 @@ physical_plan
09)----------------RepartitionExec: partitioning=RoundRobinBatch(8),
input_partitions=1
10)------------------CsvExec: file_groups={1 group:
[[WORKSPACE_ROOT/testing/data/csv/aggregate_test_100.csv]]}, projection=[c1,
c4, c9], has_header=true
-query II
+query I
INSERT INTO table_without_values SELECT
SUM(c4) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING),
COUNT(*) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING)
@@ -427,7 +427,7 @@ physical_plan
-query II
+query I
INSERT INTO table_without_values SELECT
SUM(c4) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING) as a1,
COUNT(*) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1
FOLLOWING) as a2
@@ -462,7 +462,7 @@ physical_plan
02)--SortExec: expr=[c1@0 ASC NULLS LAST], preserve_partitioning=[false]
03)----CsvExec: file_groups={1 group:
[[WORKSPACE_ROOT/testing/data/csv/aggregate_test_100.csv]]}, projection=[c1],
has_header=true
-query T
+query I
insert into table_without_values select c1 from aggregate_test_100 order by c1;
----
100
@@ -484,12 +484,12 @@ table_without_values(id BIGINT, name varchar)
STORED AS parquet
LOCATION 'test_files/scratch/insert_to_external/external_parquet_table_q4/';
-query IT
+query I
insert into table_without_values(id, name) values(1, 'foo');
----
1
-query IT
+query I
insert into table_without_values(name, id) values('bar', 2);
----
1
@@ -504,7 +504,7 @@ statement error Error during planning: Column count doesn't
match insert query!
insert into table_without_values(id) values(4, 'zoo');
# insert NULL values for the missing column (name)
-query IT
+query I
insert into table_without_values(id) values(4);
----
1
@@ -526,18 +526,18 @@ table_without_values(field1 BIGINT NOT NULL, field2
BIGINT NULL)
STORED AS parquet
LOCATION 'test_files/scratch/insert_to_external/external_parquet_table_q5/';
-query II
+query I
insert into table_without_values values(1, 100);
----
1
-query II
+query I
insert into table_without_values values(2, NULL);
----
1
# insert NULL values for the missing column (field2)
-query II
+query I
insert into table_without_values(field1) values(3);
----
1
@@ -576,7 +576,7 @@ CREATE EXTERNAL TABLE test_column_defaults(
LOCATION 'test_files/scratch/insert_to_external/external_parquet_table_q6/';
# fill in all column values
-query IIITP
+query I
insert into test_column_defaults values(1, 10, 100, 'ABC', now())
----
1
@@ -584,7 +584,7 @@ insert into test_column_defaults values(1, 10, 100, 'ABC',
now())
statement error DataFusion error: Execution error: Invalid batch column at '1'
has null but schema specifies non-nullable
insert into test_column_defaults(a) values(2)
-query IIITP
+query I
insert into test_column_defaults(b) values(20)
----
1
@@ -596,7 +596,7 @@ select a,b,c,d from test_column_defaults
NULL 20 500 default_text
# fill the timestamp column with default value `now()` again, it should be
different from the previous one
-query IIITP
+query I
insert into test_column_defaults(a, b, c, d) values(2, 20, 200, 'DEF')
----
1
diff --git a/datafusion/sqllogictest/test_files/math.slt
b/datafusion/sqllogictest/test_files/math.slt
index 5f3e1dd9ee..802323ca45 100644
--- a/datafusion/sqllogictest/test_files/math.slt
+++ b/datafusion/sqllogictest/test_files/math.slt
@@ -142,12 +142,12 @@ CREATE TABLE test_nullable_integer(
(0, 0, 0, 0, 0, 0, 0, 0, 'zeros'),
(1, 1, 1, 1, 1, 1, 1, 1, 'ones');
-query IIIIIIIIT
+query I
INSERT into test_nullable_integer values(-128, -32768, -2147483648,
-9223372036854775808, 0, 0, 0, 0, 'mins');
----
1
-query IIIIIIIIT
+query I
INSERT into test_nullable_integer values(127, 32767, 2147483647,
9223372036854775807, 255, 65535, 4294967295, 18446744073709551615, 'maxs');
----
1
@@ -283,7 +283,7 @@ CREATE TABLE test_non_nullable_integer(
c8 BIGINT UNSIGNED NOT NULL,
);
-query IIIIIIII
+query I
INSERT INTO test_non_nullable_integer VALUES(1, 1, 1, 1, 1, 1, 1, 1)
----
1
@@ -418,7 +418,7 @@ CREATE TABLE test_non_nullable_float(
c2 double NOT NULL,
);
-query RR
+query I
INSERT INTO test_non_nullable_float VALUES
(-1.0, -1.0),
(1.0, 1.0),
@@ -473,7 +473,7 @@ CREATE TABLE test_nullable_decimal(
(0, 0, 0, 0),
(NULL, NULL, NULL, NULL);
-query RRRR
+query I
INSERT into test_nullable_decimal values
(
-99999999.99,
@@ -546,7 +546,7 @@ drop table test_nullable_decimal
statement ok
CREATE TABLE test_non_nullable_decimal(c1 DECIMAL(9,2) NOT NULL);
-query R
+query I
INSERT INTO test_non_nullable_decimal VALUES(1)
----
1
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]