lidavidm commented on code in PR #13166:
URL: https://github.com/apache/arrow/pull/13166#discussion_r875103930


##########
java/adapter/jdbc/src/test/java/org/apache/arrow/adapter/jdbc/h2/JdbcToArrowTest.java:
##########
@@ -311,4 +320,146 @@ public void testMockDataTypes() throws SQLException {
     assertEquals("1", element.getString());
   }
 
+  @Test
+  public void testUnreliableMetaDataPrecisionAndScale() throws Exception {
+    BufferAllocator allocator = new RootAllocator(Integer.MAX_VALUE);
+    int x = 0;
+    final int targetRows = 0;
+    ResultSet rs = buildIncorrectPrecisionAndScaleMetaDataResultSet();
+    ResultSetMetaData rsmd = rs.getMetaData();
+    assertEquals("Column type should be Types.DECIMAL", Types.DECIMAL, 
rsmd.getColumnType(1));
+    assertEquals("Column scale should be zero", 0, rsmd.getScale(1));
+    assertEquals("Column precision should be zero", 0, rsmd.getPrecision(1));
+    rs.next();
+    BigDecimal bd1 = rs.getBigDecimal(1);
+    assertEquals("Value should be 1000000000000000.01", new 
BigDecimal("1000000000000000.01"), bd1);
+    assertEquals("Value scale should be 2", 2, bd1.scale());
+    assertEquals("Value precision should be 18", 18, bd1.precision());
+    assertFalse("No more rows!", rs.next());
+
+    // reset the ResultSet:
+    rs.beforeFirst();
+    JdbcToArrowConfig config = new JdbcToArrowConfigBuilder(
+          allocator, JdbcToArrowUtils.getUtcCalendar(), /* include metadata */ 
false)
+          .setReuseVectorSchemaRoot(reuseVectorSchemaRoot)
+          .build();
+    try {
+      ArrowVectorIterator iter = JdbcToArrow.sqlToArrowVectorIterator(rs, 
config);
+      while (iter.hasNext()) {
+        iter.next();
+      }
+      fail("Expected to fail due to mismatched metadata!");
+      iter.close();
+    } catch (Exception ex) {
+      // expected to fail
+    }
+
+    // reset the ResultSet:
+    rs.beforeFirst();
+    JdbcFieldInfo explicitMappingField = new JdbcFieldInfo(Types.DECIMAL, 18, 
2);
+    Map<Integer, JdbcFieldInfo> explicitMapping = new HashMap<>();
+    explicitMapping.put(1, explicitMappingField);
+    config = new JdbcToArrowConfigBuilder(
+            allocator, JdbcToArrowUtils.getUtcCalendar(), /* include metadata 
*/ false)
+            .setReuseVectorSchemaRoot(reuseVectorSchemaRoot)
+            .setExplicitTypesByColumnIndex(explicitMapping)
+            .build();
+
+    try {
+      ArrowVectorIterator iter = JdbcToArrow.sqlToArrowVectorIterator(rs, 
config);
+      while (iter.hasNext()) {
+        iter.next();
+      }
+      iter.close();
+    } catch (Exception ex) {
+      fail("Should not fail with explicit metadata supplied!");
+    }
+
+  }
+
+  @Test
+  public void testInconsistentPrecisionAndScale() throws Exception {
+    BufferAllocator allocator = new RootAllocator(Integer.MAX_VALUE);
+    int x = 0;
+    final int targetRows = 0;
+    ResultSet rs = buildVaryingPrecisionAndScaleResultSet();
+    ResultSetMetaData rsmd = rs.getMetaData();
+    assertEquals("Column type should be Types.DECIMAL", Types.DECIMAL, 
rsmd.getColumnType(1));
+    assertEquals("Column scale should be zero", 0, rsmd.getScale(1));
+    assertEquals("Column precision should be zero", 0, rsmd.getPrecision(1));
+    rs.next();
+    BigDecimal bd1 = rs.getBigDecimal(1);
+    assertEquals("Value should be 1000000000000000.01", new 
BigDecimal("1000000000000000.01"), bd1);
+    assertEquals("Value scale should be 2", 2, bd1.scale());
+    assertEquals("Value precision should be 18", 18, bd1.precision());
+    rs.next();
+    BigDecimal bd2 = rs.getBigDecimal(1);
+    assertEquals("Value should be 1000000000300.0000001", new 
BigDecimal("1000000000300.0000001"), bd2);
+    assertEquals("Value scale should be 7", 7, bd2.scale());
+    assertEquals("Value precision should be 20", 20, bd2.precision());
+    rs.beforeFirst();
+    JdbcFieldInfo explicitMappingField = new JdbcFieldInfo(Types.DECIMAL, 20, 
7);
+    Map<Integer, JdbcFieldInfo> explicitMapping = new HashMap<>();
+    explicitMapping.put(1, explicitMappingField);
+
+    JdbcToArrowConfig config = new JdbcToArrowConfigBuilder(
+            allocator, JdbcToArrowUtils.getUtcCalendar(), /* include metadata 
*/ false)
+            .setReuseVectorSchemaRoot(reuseVectorSchemaRoot)
+            .setExplicitTypesByColumnIndex(explicitMapping)
+            .build();
+    try {
+      ArrowVectorIterator iter = JdbcToArrow.sqlToArrowVectorIterator(rs, 
config);
+      while (iter.hasNext()) {
+        iter.next();
+      }
+      iter.close();
+    } catch (Exception ex) {
+      // Fails here due to ARROW-16427:
+      // fail("Failed to process ResultSet");

Review Comment:
   If we don't expect this to fail anymore, should we remove the try-catch (or 
uncomment the fail) instead of swallowing exceptions?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to