WillAyd commented on code in PR #1288:
URL: https://github.com/apache/arrow-adbc/pull/1288#discussion_r1410007258


##########
c/validation/adbc_validation.cc:
##########
@@ -1528,6 +1528,199 @@ void StatementTest::TestSqlIngestFloat64() {
   
ASSERT_NO_FATAL_FAILURE(TestSqlIngestNumericType<double>(NANOARROW_TYPE_DOUBLE));
 }
 
+// For full coverage, ensure that this contains Decimal examples that:
+// - Have >= four zeroes to the left of the decimal point
+// - Have >= four zeroes to the right of the decimal point
+// - Have >= four trailing zeroes to the right of the decimal point
+// - Have >= four leading zeroes before the first digit to the right of the 
decimal point
+// - Is < 0 (negative)
+// - The arrow Decimal implementations do not support special values nan, ±inf
+void StatementTest::TestSqlIngestDecimal128() {
+  if (!quirks()->supports_bulk_ingest(ADBC_INGEST_OPTION_MODE_CREATE)) {
+    GTEST_SKIP();
+  }
+
+  ASSERT_THAT(quirks()->DropTable(&connection, "bulk_ingest", &error),
+              IsOkStatus(&error));
+
+  Handle<struct ArrowSchema> schema;
+  Handle<struct ArrowArray> array;
+  struct ArrowError na_error;
+  constexpr int32_t size = 128;
+  constexpr enum ArrowType type = NANOARROW_TYPE_DECIMAL128;
+
+  struct ArrowDecimal decimal1;
+  struct ArrowDecimal decimal2;
+  struct ArrowDecimal decimal3;
+  struct ArrowDecimal decimal4;
+  struct ArrowDecimal decimal5;
+  struct ArrowDecimal decimal6;
+
+  ArrowDecimalInit(&decimal1, size, 38, 8);
+  ArrowDecimalSetInt(&decimal1, -12345600000);
+  ArrowDecimalInit(&decimal2, size, 38, 8);
+  ArrowDecimalSetInt(&decimal2, 1234);
+  ArrowDecimalInit(&decimal3, size, 38, 8);
+  ArrowDecimalSetInt(&decimal3, 100000000);
+  ArrowDecimalInit(&decimal4, size, 38, 8);
+  ArrowDecimalSetInt(&decimal4, 12345600000);
+  ArrowDecimalInit(&decimal5, size, 38, 8);
+  ArrowDecimalSetInt(&decimal5, 100000000000000);
+  ArrowDecimalInit(&decimal6, size, 38, 8);
+  // 23423942305922323.49023094 in little endian
+  uint8_t le_data[16] = {
+    0x76, 0xbb, 0xc8, 0x2c, 0x1c, 0x2b, 0x18, 0x72,
+    0x05, 0xf0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+  ArrowDecimalSetBytes(&decimal6, le_data);
+
+  const std::vector<std::optional<ArrowDecimal*>> values = {
+    std::nullopt, &decimal1, &decimal2, &decimal3, &decimal4, &decimal5, 
&decimal6};
+
+  ASSERT_THAT(MakeSchema(&schema.value, {{"col", type}}), IsOkErrno());
+  ASSERT_THAT(MakeBatch<ArrowDecimal*>(&schema.value, &array.value,
+                                       &na_error, values), IsOkErrno());
+
+  ASSERT_THAT(AdbcStatementNew(&connection, &statement, &error), 
IsOkStatus(&error));
+  ASSERT_THAT(AdbcStatementSetOption(&statement, 
ADBC_INGEST_OPTION_TARGET_TABLE,
+                                     "bulk_ingest", &error),
+              IsOkStatus(&error));
+  ASSERT_THAT(AdbcStatementBind(&statement, &array.value, &schema.value, 
&error),
+              IsOkStatus(&error));
+
+  int64_t rows_affected = 0;
+  ASSERT_THAT(AdbcStatementExecuteQuery(&statement, nullptr, &rows_affected, 
&error),
+              IsOkStatus(&error));
+  ASSERT_THAT(rows_affected,
+              ::testing::AnyOf(::testing::Eq(values.size()), 
::testing::Eq(-1)));
+
+  ASSERT_THAT(AdbcStatementSetSqlQuery(
+                  &statement,
+                  "SELECT * FROM bulk_ingest ORDER BY \"col\" ASC NULLS 
FIRST", &error),
+              IsOkStatus(&error));
+  {
+    StreamReader reader;
+    ASSERT_THAT(AdbcStatementExecuteQuery(&statement, &reader.stream.value,
+                                          &reader.rows_affected, &error),
+                IsOkStatus(&error));
+    ASSERT_THAT(reader.rows_affected,
+                ::testing::AnyOf(::testing::Eq(values.size()), 
::testing::Eq(-1)));
+
+    ASSERT_NO_FATAL_FAILURE(reader.GetSchema());
+    ArrowType round_trip_type = quirks()->IngestSelectRoundTripType(type);
+    ASSERT_NO_FATAL_FAILURE(
+        CompareSchema(&reader.schema.value, {{"col", round_trip_type, 
NULLABLE}}));
+
+    ASSERT_NO_FATAL_FAILURE(reader.Next());
+    ASSERT_NE(nullptr, reader.array->release);
+    ASSERT_EQ(values.size(), reader.array->length);
+    ASSERT_EQ(1, reader.array->n_children);
+
+    const std::vector<std::optional<std::string>> str_values = {
+      std::nullopt, "-123.456", "0.00001234", "1",  "123.456", "1000000",
+    "23423942305922323.49023094"};
+    ASSERT_NO_FATAL_FAILURE(
+            CompareArray<std::string>(reader.array_view->children[0], 
str_values));
+
+    ASSERT_NO_FATAL_FAILURE(reader.Next());
+    ASSERT_EQ(nullptr, reader.array->release);
+  }
+  ASSERT_THAT(AdbcStatementRelease(&statement, &error), IsOkStatus(&error));
+}
+
+void StatementTest::TestSqlIngestDecimal256() {

Review Comment:
   Ideally we can create fixtures on bitwidth and the data; this is pretty much 
copy/paste from the Decimal128 test above but it just tests for a larger 
precision and value



##########
c/validation/adbc_validation_util.cc:
##########
@@ -141,7 +141,25 @@ int MakeSchema(struct ArrowSchema* schema, const 
std::vector<SchemaField>& field
   CHECK_ERRNO(ArrowSchemaSetTypeStruct(schema, fields.size()));
   size_t i = 0;
   for (const SchemaField& field : fields) {
-    CHECK_ERRNO(ArrowSchemaSetType(schema->children[i], field.type));
+    switch (field.type) {
+    case NANOARROW_TYPE_DECIMAL128:
+      // TODO: how can we avoid hard-coding precision and scale?

Review Comment:
   Need to still find a better way to specify precision / scale than 
hard-coding here, but I think that will be dependent on larger test design



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to