derrickaw commented on code in PR #35567:
URL: https://github.com/apache/beam/pull/35567#discussion_r2209090858


##########
sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServicesImpl.java:
##########
@@ -1149,22 +1157,61 @@ <T> long insertAll(
           // If this row's encoding by itself is larger than the maximum row 
payload, then it's
           // impossible to insert into BigQuery, and so we send it out through 
the dead-letter
           // queue.
-          if (nextRowSize >= MAX_BQ_ROW_PAYLOAD) {
+          if (nextRowSize >= MAX_BQ_ROW_PAYLOAD_BYTES) {
             InsertErrors error =
                 new InsertErrors()
                     .setErrors(ImmutableList.of(new 
ErrorProto().setReason("row-too-large")));
             // We verify whether the retryPolicy parameter expects us to 
retry. If it does, then
             // it will return true. Otherwise it will return false.
-            Boolean isRetry = retryPolicy.shouldRetry(new 
InsertRetryPolicy.Context(error));
-            if (isRetry) {
+            if (retryPolicy.shouldRetry(new InsertRetryPolicy.Context(error))) 
{
+              // Obtain table schema
+              TableSchema tableSchema = null;
+              try {
+                String tableSpec = BigQueryHelpers.toTableSpec(ref);
+                if (tableSchemaCache.containsKey(tableSpec)) {
+                  tableSchema = tableSchemaCache.get(tableSpec);
+                } else {
+                  Table table = getTable(ref);
+                  if (table != null) {
+                    table.getSchema();
+                    tableSchema =
+                        
TableRowToStorageApiProto.schemaToProtoTableSchema(table.getSchema());
+                    tableSchemaCache.put(tableSpec, tableSchema);
+                  }
+                }
+              } catch (Exception e) {
+                LOG.warn("Could not fetch table schema for {}.", ref, e);
+              }
+
+              // Create BigQuery schema map to use for formatting
+              String rowDetails;
+              try {
+                if (tableSchema != null) {
+                  // Creates bqSchemaMap containing field name, field type, and
+                  // possibly field mode if available.
+                  Map<String, String> bqSchemaMap =
+                      tableSchema.getFieldsList().stream()
+                          .collect(Collectors.toMap(f -> f.getName(), f -> 
f.getType().name()));
+                  rowDetails = formatRowWithSchema(row, bqSchemaMap);
+                } else {
+                  rowDetails = formatRowWithoutSchema(row);
+                }
+              } catch (Exception e) {
+                rowDetails = row.keySet().toString();
+              }
+              if (rowDetails.length() > 1024) {
+                rowDetails = rowDetails.substring(0, 1024) + "...}";
+              }
+
               throw new RuntimeException(
                   String.format(
-                      "We have observed a row that is %s bytes in size and 
exceeded BigQueryIO"
-                          + " limit of 9MB. While BigQuery supports request 
sizes up to 10MB,"
-                          + " BigQueryIO sets the limit at 9MB to leave room 
for request"
-                          + " overhead. You may change your retry strategy to 
unblock this"
-                          + " pipeline, and the row will be output as a failed 
insert.",
-                      nextRowSize));
+                      "We have observed a row of size %s bytes exceeding the "
+                          + "BigQueryIO limit of %s. This is probably due to a 
schema "
+                          + "mismatch. Problematic row schema "

Review Comment:
   Its true :).  See other comment.  Thanks



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@beam.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to