vingov commented on a change in pull request #5125:
URL: https://github.com/apache/hudi/pull/5125#discussion_r839193909



##########
File path: 
hudi-sync/hudi-bigquery-sync/src/main/java/org/apache/hudi/bigquery/HoodieBigQueryClient.java
##########
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.bigquery;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.exception.HoodieException;
+import org.apache.hudi.sync.common.AbstractSyncHoodieClient;
+
+import com.google.cloud.bigquery.BigQuery;
+import com.google.cloud.bigquery.BigQueryException;
+import com.google.cloud.bigquery.BigQueryOptions;
+import com.google.cloud.bigquery.CsvOptions;
+import com.google.cloud.bigquery.ExternalTableDefinition;
+import com.google.cloud.bigquery.Field;
+import com.google.cloud.bigquery.FormatOptions;
+import com.google.cloud.bigquery.HivePartitioningOptions;
+import com.google.cloud.bigquery.Schema;
+import com.google.cloud.bigquery.StandardSQLTypeName;
+import com.google.cloud.bigquery.TableId;
+import com.google.cloud.bigquery.TableInfo;
+import com.google.cloud.bigquery.ViewDefinition;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.parquet.schema.MessageType;
+
+import java.util.List;
+import java.util.Map;
+
+public class HoodieBigQueryClient extends AbstractSyncHoodieClient {
+  private static final Logger LOG = 
LogManager.getLogger(HoodieBigQueryClient.class);
+  private transient BigQuery bigquery;
+
+  public HoodieBigQueryClient(final BigQuerySyncConfig syncConfig, final 
FileSystem fs) {
+    super(syncConfig.basePath, syncConfig.assumeDatePartitioning, 
syncConfig.useFileListingFromMetadata,
+        false, fs);
+    this.createBigQueryConnection();
+  }
+
+  private void createBigQueryConnection() {
+    if (bigquery == null) {
+      try {
+        // Initialize client that will be used to send requests. This client 
only needs to be created
+        // once, and can be reused for multiple requests.
+        bigquery = BigQueryOptions.getDefaultInstance().getService();
+        LOG.info("Successfully established BigQuery connection.");
+      } catch (BigQueryException e) {
+        throw new HoodieException("Cannot create bigQuery connection ", e);
+      }
+    }
+  }
+
+  @Override
+  public void createTable(final String tableName, final MessageType 
storageSchema, final String inputFormatClass,
+                          final String outputFormatClass, final String 
serdeClass,
+                          final Map<String, String> serdeProperties, final 
Map<String, String> tableProperties) {
+    // bigQuery create table arguments are different, so do nothing.
+  }
+
+  public void createVersionsTable(
+      String projectId, String datasetName, String tableName, String 
sourceUri, String sourceUriPrefix, List<String> partitionFields) {
+    try {
+      ExternalTableDefinition customTable;
+      TableId tableId = TableId.of(projectId, datasetName, tableName);
+
+      if (partitionFields != null) {
+        // Configuring partitioning options for partitioned table.
+        HivePartitioningOptions hivePartitioningOptions =
+            HivePartitioningOptions.newBuilder()
+                .setMode("AUTO")
+                .setRequirePartitionFilter(false)
+                .setSourceUriPrefix(sourceUriPrefix)
+                .build();
+        customTable =
+            ExternalTableDefinition.newBuilder(sourceUri, 
FormatOptions.parquet())
+                .setAutodetect(true)
+                .setHivePartitioningOptions(hivePartitioningOptions)
+                .setIgnoreUnknownValues(true)
+                .setMaxBadRecords(0)
+                .build();
+      } else {
+        customTable =
+            ExternalTableDefinition.newBuilder(sourceUri, 
FormatOptions.parquet())
+                .setAutodetect(true)
+                .setIgnoreUnknownValues(true)
+                .setMaxBadRecords(0)
+                .build();
+      }
+
+      bigquery.create(TableInfo.of(tableId, customTable));
+      LOG.info("External table created using hivepartitioningoptions");
+    } catch (BigQueryException e) {
+      throw new HoodieException("External table was not created ", e);

Review comment:
       Good point, Resolved.

##########
File path: 
hudi-sync/hudi-bigquery-sync/src/main/java/org/apache/hudi/bigquery/HoodieBigQueryClient.java
##########
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.bigquery;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.exception.HoodieException;
+import org.apache.hudi.sync.common.AbstractSyncHoodieClient;
+
+import com.google.cloud.bigquery.BigQuery;
+import com.google.cloud.bigquery.BigQueryException;
+import com.google.cloud.bigquery.BigQueryOptions;
+import com.google.cloud.bigquery.CsvOptions;
+import com.google.cloud.bigquery.ExternalTableDefinition;
+import com.google.cloud.bigquery.Field;
+import com.google.cloud.bigquery.FormatOptions;
+import com.google.cloud.bigquery.HivePartitioningOptions;
+import com.google.cloud.bigquery.Schema;
+import com.google.cloud.bigquery.StandardSQLTypeName;
+import com.google.cloud.bigquery.TableId;
+import com.google.cloud.bigquery.TableInfo;
+import com.google.cloud.bigquery.ViewDefinition;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.parquet.schema.MessageType;
+
+import java.util.List;
+import java.util.Map;
+
+public class HoodieBigQueryClient extends AbstractSyncHoodieClient {
+  private static final Logger LOG = 
LogManager.getLogger(HoodieBigQueryClient.class);
+  private transient BigQuery bigquery;
+
+  public HoodieBigQueryClient(final BigQuerySyncConfig syncConfig, final 
FileSystem fs) {
+    super(syncConfig.basePath, syncConfig.assumeDatePartitioning, 
syncConfig.useFileListingFromMetadata,
+        false, fs);
+    this.createBigQueryConnection();
+  }
+
+  private void createBigQueryConnection() {
+    if (bigquery == null) {
+      try {
+        // Initialize client that will be used to send requests. This client 
only needs to be created
+        // once, and can be reused for multiple requests.
+        bigquery = BigQueryOptions.getDefaultInstance().getService();
+        LOG.info("Successfully established BigQuery connection.");
+      } catch (BigQueryException e) {
+        throw new HoodieException("Cannot create bigQuery connection ", e);
+      }
+    }
+  }
+
+  @Override
+  public void createTable(final String tableName, final MessageType 
storageSchema, final String inputFormatClass,
+                          final String outputFormatClass, final String 
serdeClass,
+                          final Map<String, String> serdeProperties, final 
Map<String, String> tableProperties) {
+    // bigQuery create table arguments are different, so do nothing.
+  }
+
+  public void createVersionsTable(
+      String projectId, String datasetName, String tableName, String 
sourceUri, String sourceUriPrefix, List<String> partitionFields) {
+    try {
+      ExternalTableDefinition customTable;
+      TableId tableId = TableId.of(projectId, datasetName, tableName);
+
+      if (partitionFields != null) {
+        // Configuring partitioning options for partitioned table.
+        HivePartitioningOptions hivePartitioningOptions =
+            HivePartitioningOptions.newBuilder()
+                .setMode("AUTO")
+                .setRequirePartitionFilter(false)
+                .setSourceUriPrefix(sourceUriPrefix)
+                .build();
+        customTable =
+            ExternalTableDefinition.newBuilder(sourceUri, 
FormatOptions.parquet())
+                .setAutodetect(true)
+                .setHivePartitioningOptions(hivePartitioningOptions)
+                .setIgnoreUnknownValues(true)
+                .setMaxBadRecords(0)
+                .build();
+      } else {
+        customTable =
+            ExternalTableDefinition.newBuilder(sourceUri, 
FormatOptions.parquet())
+                .setAutodetect(true)
+                .setIgnoreUnknownValues(true)
+                .setMaxBadRecords(0)
+                .build();
+      }
+
+      bigquery.create(TableInfo.of(tableId, customTable));
+      LOG.info("External table created using hivepartitioningoptions");
+    } catch (BigQueryException e) {
+      throw new HoodieException("External table was not created ", e);
+    }
+  }
+
+  public void createManifestTable(
+      String projectId, String datasetName, String tableName, String 
sourceUri) {
+    try {
+      TableId tableId = TableId.of(projectId, datasetName, tableName);
+      CsvOptions csvOptions = CsvOptions.newBuilder()
+          .setFieldDelimiter(",")
+          .setAllowJaggedRows(false)
+          .setAllowQuotedNewLines(false)
+          .setSkipLeadingRows(0)
+          .build();
+      Schema schema = Schema.of(
+          Field.of("filename", StandardSQLTypeName.STRING));
+
+      ExternalTableDefinition customTable =
+          ExternalTableDefinition.newBuilder(sourceUri, schema, csvOptions)
+              .setAutodetect(false)
+              .setIgnoreUnknownValues(false)
+              .setMaxBadRecords(0)
+              .build();
+      bigquery.create(TableInfo.of(tableId, customTable));
+      LOG.info("Manifest External table created.");
+    } catch (BigQueryException e) {
+      throw new HoodieException("Manifest External table was not created ", e);
+    }
+  }
+
+  public void createSnapshotView(
+      String projectId, String datasetName, String viewName, String 
versionsTableName, String manifestTableName) {
+    try {
+      TableId tableId = TableId.of(projectId, datasetName, viewName);
+      String query =
+          String.format(
+              "SELECT * FROM `%s.%s.%s` WHERE _hoodie_file_name IN "
+                  + "(SELECT filename FROM `%s.%s.%s`)",
+              projectId,
+              datasetName,
+              versionsTableName,
+              projectId,
+              datasetName,
+              manifestTableName);
+
+      ViewDefinition viewDefinition =
+          ViewDefinition.newBuilder(query).setUseLegacySql(false).build();
+
+      bigquery.create(TableInfo.of(tableId, viewDefinition));
+      LOG.info("View created successfully");
+    } catch (BigQueryException e) {
+      throw new HoodieException("View was not created ", e);
+    }
+  }
+
+  @Override
+  public Map<String, String> getTableSchema(String tableName) {
+    // TODO: Implement automatic schema evolution when you add a new column.
+    return null;

Review comment:
       Good point, Resolved.

##########
File path: 
hudi-sync/hudi-bigquery-sync/src/main/java/org/apache/hudi/bigquery/HoodieBigQueryClient.java
##########
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.bigquery;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.exception.HoodieException;
+import org.apache.hudi.sync.common.AbstractSyncHoodieClient;
+
+import com.google.cloud.bigquery.BigQuery;
+import com.google.cloud.bigquery.BigQueryException;
+import com.google.cloud.bigquery.BigQueryOptions;
+import com.google.cloud.bigquery.CsvOptions;
+import com.google.cloud.bigquery.ExternalTableDefinition;
+import com.google.cloud.bigquery.Field;
+import com.google.cloud.bigquery.FormatOptions;
+import com.google.cloud.bigquery.HivePartitioningOptions;
+import com.google.cloud.bigquery.Schema;
+import com.google.cloud.bigquery.StandardSQLTypeName;
+import com.google.cloud.bigquery.TableId;
+import com.google.cloud.bigquery.TableInfo;
+import com.google.cloud.bigquery.ViewDefinition;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.parquet.schema.MessageType;
+
+import java.util.List;
+import java.util.Map;
+
+public class HoodieBigQueryClient extends AbstractSyncHoodieClient {
+  private static final Logger LOG = 
LogManager.getLogger(HoodieBigQueryClient.class);
+  private transient BigQuery bigquery;
+
+  public HoodieBigQueryClient(final BigQuerySyncConfig syncConfig, final 
FileSystem fs) {
+    super(syncConfig.basePath, syncConfig.assumeDatePartitioning, 
syncConfig.useFileListingFromMetadata,
+        false, fs);
+    this.createBigQueryConnection();
+  }
+
+  private void createBigQueryConnection() {
+    if (bigquery == null) {
+      try {
+        // Initialize client that will be used to send requests. This client 
only needs to be created
+        // once, and can be reused for multiple requests.
+        bigquery = BigQueryOptions.getDefaultInstance().getService();
+        LOG.info("Successfully established BigQuery connection.");
+      } catch (BigQueryException e) {
+        throw new HoodieException("Cannot create bigQuery connection ", e);
+      }
+    }
+  }
+
+  @Override
+  public void createTable(final String tableName, final MessageType 
storageSchema, final String inputFormatClass,
+                          final String outputFormatClass, final String 
serdeClass,
+                          final Map<String, String> serdeProperties, final 
Map<String, String> tableProperties) {
+    // bigQuery create table arguments are different, so do nothing.
+  }
+
+  public void createVersionsTable(
+      String projectId, String datasetName, String tableName, String 
sourceUri, String sourceUriPrefix, List<String> partitionFields) {
+    try {
+      ExternalTableDefinition customTable;
+      TableId tableId = TableId.of(projectId, datasetName, tableName);
+
+      if (partitionFields != null) {
+        // Configuring partitioning options for partitioned table.
+        HivePartitioningOptions hivePartitioningOptions =
+            HivePartitioningOptions.newBuilder()
+                .setMode("AUTO")
+                .setRequirePartitionFilter(false)
+                .setSourceUriPrefix(sourceUriPrefix)
+                .build();
+        customTable =
+            ExternalTableDefinition.newBuilder(sourceUri, 
FormatOptions.parquet())
+                .setAutodetect(true)
+                .setHivePartitioningOptions(hivePartitioningOptions)
+                .setIgnoreUnknownValues(true)
+                .setMaxBadRecords(0)
+                .build();
+      } else {
+        customTable =
+            ExternalTableDefinition.newBuilder(sourceUri, 
FormatOptions.parquet())
+                .setAutodetect(true)
+                .setIgnoreUnknownValues(true)
+                .setMaxBadRecords(0)
+                .build();
+      }
+
+      bigquery.create(TableInfo.of(tableId, customTable));
+      LOG.info("External table created using hivepartitioningoptions");
+    } catch (BigQueryException e) {
+      throw new HoodieException("External table was not created ", e);
+    }
+  }
+
+  public void createManifestTable(
+      String projectId, String datasetName, String tableName, String 
sourceUri) {
+    try {
+      TableId tableId = TableId.of(projectId, datasetName, tableName);
+      CsvOptions csvOptions = CsvOptions.newBuilder()
+          .setFieldDelimiter(",")
+          .setAllowJaggedRows(false)
+          .setAllowQuotedNewLines(false)
+          .setSkipLeadingRows(0)
+          .build();
+      Schema schema = Schema.of(
+          Field.of("filename", StandardSQLTypeName.STRING));
+
+      ExternalTableDefinition customTable =
+          ExternalTableDefinition.newBuilder(sourceUri, schema, csvOptions)
+              .setAutodetect(false)
+              .setIgnoreUnknownValues(false)
+              .setMaxBadRecords(0)
+              .build();
+      bigquery.create(TableInfo.of(tableId, customTable));
+      LOG.info("Manifest External table created.");
+    } catch (BigQueryException e) {
+      throw new HoodieException("Manifest External table was not created ", e);

Review comment:
       Resolved.

##########
File path: 
hudi-sync/hudi-bigquery-sync/src/main/java/org/apache/hudi/bigquery/HoodieBigQueryClient.java
##########
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.bigquery;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.exception.HoodieException;
+import org.apache.hudi.sync.common.AbstractSyncHoodieClient;
+
+import com.google.cloud.bigquery.BigQuery;
+import com.google.cloud.bigquery.BigQueryException;
+import com.google.cloud.bigquery.BigQueryOptions;
+import com.google.cloud.bigquery.CsvOptions;
+import com.google.cloud.bigquery.ExternalTableDefinition;
+import com.google.cloud.bigquery.Field;
+import com.google.cloud.bigquery.FormatOptions;
+import com.google.cloud.bigquery.HivePartitioningOptions;
+import com.google.cloud.bigquery.Schema;
+import com.google.cloud.bigquery.StandardSQLTypeName;
+import com.google.cloud.bigquery.TableId;
+import com.google.cloud.bigquery.TableInfo;
+import com.google.cloud.bigquery.ViewDefinition;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.parquet.schema.MessageType;
+
+import java.util.List;
+import java.util.Map;
+
+public class HoodieBigQueryClient extends AbstractSyncHoodieClient {
+  private static final Logger LOG = 
LogManager.getLogger(HoodieBigQueryClient.class);
+  private transient BigQuery bigquery;
+
+  public HoodieBigQueryClient(final BigQuerySyncConfig syncConfig, final 
FileSystem fs) {
+    super(syncConfig.basePath, syncConfig.assumeDatePartitioning, 
syncConfig.useFileListingFromMetadata,
+        false, fs);
+    this.createBigQueryConnection();
+  }
+
+  private void createBigQueryConnection() {
+    if (bigquery == null) {
+      try {
+        // Initialize client that will be used to send requests. This client 
only needs to be created
+        // once, and can be reused for multiple requests.
+        bigquery = BigQueryOptions.getDefaultInstance().getService();
+        LOG.info("Successfully established BigQuery connection.");
+      } catch (BigQueryException e) {
+        throw new HoodieException("Cannot create bigQuery connection ", e);
+      }
+    }
+  }
+
+  @Override
+  public void createTable(final String tableName, final MessageType 
storageSchema, final String inputFormatClass,
+                          final String outputFormatClass, final String 
serdeClass,
+                          final Map<String, String> serdeProperties, final 
Map<String, String> tableProperties) {
+    // bigQuery create table arguments are different, so do nothing.
+  }
+
+  public void createVersionsTable(
+      String projectId, String datasetName, String tableName, String 
sourceUri, String sourceUriPrefix, List<String> partitionFields) {
+    try {
+      ExternalTableDefinition customTable;
+      TableId tableId = TableId.of(projectId, datasetName, tableName);
+
+      if (partitionFields != null) {
+        // Configuring partitioning options for partitioned table.
+        HivePartitioningOptions hivePartitioningOptions =
+            HivePartitioningOptions.newBuilder()
+                .setMode("AUTO")
+                .setRequirePartitionFilter(false)
+                .setSourceUriPrefix(sourceUriPrefix)
+                .build();
+        customTable =
+            ExternalTableDefinition.newBuilder(sourceUri, 
FormatOptions.parquet())
+                .setAutodetect(true)
+                .setHivePartitioningOptions(hivePartitioningOptions)
+                .setIgnoreUnknownValues(true)
+                .setMaxBadRecords(0)
+                .build();
+      } else {
+        customTable =
+            ExternalTableDefinition.newBuilder(sourceUri, 
FormatOptions.parquet())
+                .setAutodetect(true)
+                .setIgnoreUnknownValues(true)
+                .setMaxBadRecords(0)
+                .build();
+      }
+
+      bigquery.create(TableInfo.of(tableId, customTable));
+      LOG.info("External table created using hivepartitioningoptions");
+    } catch (BigQueryException e) {
+      throw new HoodieException("External table was not created ", e);
+    }
+  }
+
+  public void createManifestTable(
+      String projectId, String datasetName, String tableName, String 
sourceUri) {
+    try {
+      TableId tableId = TableId.of(projectId, datasetName, tableName);
+      CsvOptions csvOptions = CsvOptions.newBuilder()
+          .setFieldDelimiter(",")
+          .setAllowJaggedRows(false)
+          .setAllowQuotedNewLines(false)
+          .setSkipLeadingRows(0)
+          .build();
+      Schema schema = Schema.of(
+          Field.of("filename", StandardSQLTypeName.STRING));
+
+      ExternalTableDefinition customTable =
+          ExternalTableDefinition.newBuilder(sourceUri, schema, csvOptions)
+              .setAutodetect(false)
+              .setIgnoreUnknownValues(false)
+              .setMaxBadRecords(0)
+              .build();
+      bigquery.create(TableInfo.of(tableId, customTable));
+      LOG.info("Manifest External table created.");
+    } catch (BigQueryException e) {
+      throw new HoodieException("Manifest External table was not created ", e);
+    }
+  }
+
+  public void createSnapshotView(
+      String projectId, String datasetName, String viewName, String 
versionsTableName, String manifestTableName) {
+    try {
+      TableId tableId = TableId.of(projectId, datasetName, viewName);
+      String query =
+          String.format(
+              "SELECT * FROM `%s.%s.%s` WHERE _hoodie_file_name IN "
+                  + "(SELECT filename FROM `%s.%s.%s`)",
+              projectId,
+              datasetName,
+              versionsTableName,
+              projectId,
+              datasetName,
+              manifestTableName);
+
+      ViewDefinition viewDefinition =
+          ViewDefinition.newBuilder(query).setUseLegacySql(false).build();
+
+      bigquery.create(TableInfo.of(tableId, viewDefinition));
+      LOG.info("View created successfully");
+    } catch (BigQueryException e) {
+      throw new HoodieException("View was not created ", e);
+    }
+  }
+
+  @Override
+  public Map<String, String> getTableSchema(String tableName) {
+    // TODO: Implement automatic schema evolution when you add a new column.
+    return null;
+  }
+
+  @Override
+  public void addPartitionsToTable(final String tableName, final List<String> 
partitionsToAdd) {
+    // bigQuery discovers the new partitions automatically, so do nothing.
+  }
+
+  @Override
+  public void dropPartitionsToTable(final String tableName, final List<String> 
partitionsToDrop) {
+    // bigQuery discovers the new partitions automatically, so do nothing.
+  }
+
+  @Override
+  public boolean doesTableExist(final String tableName) {
+    // bigQuery table exists needs different set of arguments, so do nothing.
+    throw new UnsupportedOperationException("Not support doesTableExist yet.");
+  }
+
+  public boolean doesTableExist(final String projectId, final String 
datasetName, final String tableName) {
+    TableId tableId = TableId.of(projectId, datasetName, tableName);
+    return bigquery.getTable(tableId, BigQuery.TableOption.fields()) != null;
+  }
+
+  public boolean doesViewExist(final String projectId, final String 
datasetName, final String viewName) {
+    TableId tableId = TableId.of(projectId, datasetName, viewName);
+    return bigquery.getTable(tableId) != null;
+  }
+
+  @Override
+  public Option<String> getLastCommitTimeSynced(final String tableName) {
+    // bigQuery doesn't support tblproperties, so do nothing.
+    throw new UnsupportedOperationException("Not support 
getLastCommitTimeSynced yet.");
+  }
+
+  @Override
+  public void updateLastCommitTimeSynced(final String tableName) {
+    // bigQuery doesn't support tblproperties, so do nothing.

Review comment:
       Good point, Resolved.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to