taherk77 commented on a change in pull request #969: [HUDI-251] JDBC
incremental load to HUDI DeltaStreamer
URL: https://github.com/apache/incubator-hudi/pull/969#discussion_r341938401
##########
File path:
hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JDBCSource.java
##########
@@ -0,0 +1,235 @@
+package org.apache.hudi.utilities.sources;
+
+import java.util.Arrays;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hudi.DataSourceUtils;
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.util.TypedProperties;
+import org.apache.hudi.common.util.collection.Pair;
+import org.apache.hudi.exception.HoodieException;
+import org.apache.hudi.utilities.schema.SchemaProvider;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.sql.Column;
+import org.apache.spark.sql.DataFrameReader;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.functions;
+import org.apache.spark.sql.types.DataTypes;
+import org.jetbrains.annotations.NotNull;
+
+
+public class JDBCSource extends RowSource {
+
+ private static Logger LOG = LogManager.getLogger(JDBCSource.class);
+
+ public JDBCSource(TypedProperties props, JavaSparkContext sparkContext,
SparkSession sparkSession,
+ SchemaProvider schemaProvider) {
+ super(props, sparkContext, sparkSession, schemaProvider);
+ }
+
+ private static DataFrameReader validatePropsAndGetDataFrameReader(final
SparkSession session,
+ final TypedProperties properties)
+ throws HoodieException {
+ DataFrameReader dataFrameReader = null;
+ FSDataInputStream passwordFileStream = null;
+ try {
+ dataFrameReader = session.read().format("jdbc");
+ dataFrameReader = dataFrameReader.option(Config.URL_PROP,
properties.getString(Config.URL));
+ dataFrameReader = dataFrameReader.option(Config.USER_PROP,
properties.getString(Config.USER));
+ dataFrameReader = dataFrameReader.option(Config.DRIVER_PROP,
properties.getString(Config.DRIVER_CLASS));
+ dataFrameReader = dataFrameReader
+ .option(Config.RDBMS_TABLE_PROP,
properties.getString(Config.RDBMS_TABLE_NAME));
+
+ if (properties.containsKey(Config.PASSWORD) && !StringUtils
+ .isNullOrEmpty(properties.getString(Config.PASSWORD))) {
+ LOG.info("Reading JDBC password from properties file....");
+ dataFrameReader = dataFrameReader.option(Config.PASSWORD_PROP,
properties.getString(Config.PASSWORD));
+ } else if (properties.containsKey(Config.PASSWORD_FILE) && !StringUtils
+ .isNullOrEmpty(properties.getString(Config.PASSWORD_FILE))) {
+ LOG.info(
+ String.format("Reading JDBC password from password file %s",
properties.getString(Config.PASSWORD_FILE)));
+ FileSystem fileSystem = FileSystem.get(new Configuration());
+ passwordFileStream = fileSystem.open(new
Path(properties.getString(Config.PASSWORD_FILE)));
+ byte[] bytes = new byte[passwordFileStream.available()];
+ passwordFileStream.read(bytes);
+ dataFrameReader = dataFrameReader.option(Config.PASSWORD_PROP, new
String(bytes));
+ } else {
+ throw new IllegalArgumentException(String.format("JDBCSource needs
either a %s or %s to connect to RDBMS "
+ + "datasource", Config.PASSWORD_FILE, Config.PASSWORD));
+ }
+
+ addExtraJdbcOptions(properties, dataFrameReader);
+
+ if (properties.containsKey(Config.IS_INCREMENTAL) && StringUtils
+ .isNullOrEmpty(properties.getString(Config.IS_INCREMENTAL))) {
+ DataSourceUtils.checkRequiredProperties(properties,
Arrays.asList(Config.INCREMENTAL_COLUMN));
+ }
+ return dataFrameReader;
+ } catch (Exception e) {
+ throw new HoodieException(e);
+ } finally {
+ IOUtils.closeStream(passwordFileStream);
+ }
+ }
+
+ private static void addExtraJdbcOptions(TypedProperties properties,
DataFrameReader dataFrameReader) {
+ Set<Object> objects = properties.keySet();
+ for (Object property : objects) {
+ String prop = (String) property;
+ if (prop.startsWith(Config.EXTRA_OPTIONS)) {
+ String[] split = prop.split("\\.");
+ String key = split[split.length - 1];
+ String value = properties.getString(prop);
+ LOG.info(String.format("Adding %s -> %s to jdbc options", key, value));
+ dataFrameReader.option(key, value);
+ }
+ }
+ }
+
+ @Override
+ protected Pair<Option<Dataset<Row>>, String> fetchNextBatch(Option<String>
lastCkptStr, long sourceLimit) {
Review comment:
> Limiting might be helpful to break down the load into smaller chunks.
DBMSes don't usually like large scans... So having some ability to limit would
actually be good..
>
> @taherk77 how about having the ability to add a `LIMIT` clause depending
on the jdbc endpoint.. it should tell you if its MySQL or Postgres (two are
very popular anyway, so having this working even for those 2 initially would be
awesome)
Hi @vinothchandar so do you mean if the user sets the limit to 10. For
postgres and MYSQL we should do select * from table limit 10?
I dont think that would work here with the type of semantics we have here.
In continuous mode and full scan JDBC scans the whole table every interval.
In incremental we first do a full scan and write checkpoints we then assume
the column given for incremental is either a long, int or timestamp. If the
query for incremental fails then we fall back to full scans. How would limit
work here?
It would always keep getting the same records.
Further talking about interval of jobs. This has not yet implemented as I do
not have clarity. I want to know how we should do it. This would require
further brain storming of how to keep schedule jobs.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services