This is an automated email from the ASF dual-hosted git repository.

sivabalan pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new f3948d61580 [DOCS] Update spark and flink versions in quikstart 
(#12489)
f3948d61580 is described below

commit f3948d61580077300e9b7545408a5bb7ec8060d9
Author: Sagar Sumit <[email protected]>
AuthorDate: Fri Dec 13 20:38:25 2024 +0530

    [DOCS] Update spark and flink versions in quikstart (#12489)
---
 website/docs/flink-quick-start-guide.md                         | 8 ++++----
 website/docs/quick-start-guide.md                               | 6 +++---
 website/versioned_docs/version-1.0.0/flink-quick-start-guide.md | 8 ++++----
 website/versioned_docs/version-1.0.0/quick-start-guide.md       | 6 +++---
 4 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/website/docs/flink-quick-start-guide.md 
b/website/docs/flink-quick-start-guide.md
index 1cfda067c71..78170535620 100644
--- a/website/docs/flink-quick-start-guide.md
+++ b/website/docs/flink-quick-start-guide.md
@@ -26,7 +26,7 @@ This page introduces Flink-Hudi integration. We can feel the 
unique charm of how
 
 ### Download Flink and Start Flink cluster
 
-Hudi works with Flink 1.13 (up to Hudi 0.14.x release), Flink 1.14, Flink 
1.15, Flink 1.16, Flink 1.17, and Flink 1.18.
+Hudi works with Flink 1.13 (up to Hudi 0.14.x release), Flink 1.14, Flink 
1.15, Flink 1.16, Flink 1.17, Flink 1.18, Flink 1.19 and Flink 1.20.
 You can follow the instructions [here](https://flink.apache.org/downloads) for 
setting up Flink. Then, start a standalone Flink cluster 
 within hadoop environment. In case we are trying on local setup, then we could 
download hadoop binaries and set HADOOP_HOME.
 
@@ -68,7 +68,7 @@ Now start the SQL CLI:
 ```bash
 # For Flink versions: 1.13 - 1.18
 export FLINK_VERSION=1.17 
-export HUDI_VERSION=0.15.0
+export HUDI_VERSION=1.0.0
 wget 
https://repo1.maven.org/maven2/org/apache/hudi/hudi-flink${FLINK_VERSION}-bundle/${HUDI_VERSION}/hudi-flink${FLINK_VERSION}-bundle-${HUDI_VERSION}.jar
 -P $FLINK_HOME/lib/
 ./bin/sql-client.sh embedded -j 
lib/hudi-flink${FLINK_VERSION}-bundle-${HUDI_VERSION}.jar shell
 ```
@@ -79,14 +79,14 @@ The SQL CLI only executes the SQL line by line.
 
 <TabItem value="dataStream">
 
-Hudi works with Flink 1.13 (up to Hudi 0.14.x release), Flink 1.14, Flink 
1.15, Flink 1.16, Flink 1.17, and Flink 1.18.
+Hudi works with Flink 1.13 (up to Hudi 0.14.x release), Flink 1.14, Flink 
1.15, Flink 1.16, Flink 1.17, Flink 1.18, Flink 1.19 and Flink 1.20.
 Please add the desired dependency to your project:
 ```xml
 <!-- For Flink versions 1.13 - 1.18-->
 <properties>
     <flink.version>1.17.0</flink.version>
     <flink.binary.version>1.17</flink.binary.version>
-    <hudi.version>0.15.0</hudi.version>
+    <hudi.version>1.0.0</hudi.version>
 </properties>
 <dependency>
     <groupId>org.apache.hudi</groupId>
diff --git a/website/docs/quick-start-guide.md 
b/website/docs/quick-start-guide.md
index a9315c34e3b..ebb7f2e656f 100644
--- a/website/docs/quick-start-guide.md
+++ b/website/docs/quick-start-guide.md
@@ -58,7 +58,7 @@ From the extracted directory run spark-shell with Hudi:
 ```shell
 # For Spark versions: 3.3 - 3.5
 export SPARK_VERSION=3.5 # or 3.4, 3.3
-spark-shell --packages 
org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:0.15.0 \
+spark-shell --packages 
org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:1.0.0 \
 --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' \
 --conf 
'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog'
 \
 --conf 
'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension' \
@@ -74,7 +74,7 @@ From the extracted directory run pyspark with Hudi:
 # For Spark versions: 3.3 - 3.5
 export PYSPARK_PYTHON=$(which python3)
 export SPARK_VERSION=3.5 # or 3.4, 3.3
-pyspark --packages org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:0.15.0 
--conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --conf 
'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog'
 --conf 
'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension' 
--conf 'spark.kryo.registrator=org.apache.spark.HoodieSparkKryoRegistrar'
+pyspark --packages org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:1.0.0 
--conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --conf 
'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog'
 --conf 
'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension' 
--conf 'spark.kryo.registrator=org.apache.spark.HoodieSparkKryoRegistrar'
 ```
 </TabItem>
 
@@ -86,7 +86,7 @@ From the extracted directory run Spark SQL with Hudi:
 ```shell
 # For Spark versions: 3.3 - 3.5
 export SPARK_VERSION=3.5 # or 3.4, 3.3
-spark-sql --packages 
org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:0.15.0 \
+spark-sql --packages 
org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:1.0.0 \
 --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' \
 --conf 
'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension' \
 --conf 
'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog'
 \
diff --git a/website/versioned_docs/version-1.0.0/flink-quick-start-guide.md 
b/website/versioned_docs/version-1.0.0/flink-quick-start-guide.md
index 1cfda067c71..78170535620 100644
--- a/website/versioned_docs/version-1.0.0/flink-quick-start-guide.md
+++ b/website/versioned_docs/version-1.0.0/flink-quick-start-guide.md
@@ -26,7 +26,7 @@ This page introduces Flink-Hudi integration. We can feel the 
unique charm of how
 
 ### Download Flink and Start Flink cluster
 
-Hudi works with Flink 1.13 (up to Hudi 0.14.x release), Flink 1.14, Flink 
1.15, Flink 1.16, Flink 1.17, and Flink 1.18.
+Hudi works with Flink 1.13 (up to Hudi 0.14.x release), Flink 1.14, Flink 
1.15, Flink 1.16, Flink 1.17, Flink 1.18, Flink 1.19 and Flink 1.20.
 You can follow the instructions [here](https://flink.apache.org/downloads) for 
setting up Flink. Then, start a standalone Flink cluster 
 within hadoop environment. In case we are trying on local setup, then we could 
download hadoop binaries and set HADOOP_HOME.
 
@@ -68,7 +68,7 @@ Now start the SQL CLI:
 ```bash
 # For Flink versions: 1.13 - 1.18
 export FLINK_VERSION=1.17 
-export HUDI_VERSION=0.15.0
+export HUDI_VERSION=1.0.0
 wget 
https://repo1.maven.org/maven2/org/apache/hudi/hudi-flink${FLINK_VERSION}-bundle/${HUDI_VERSION}/hudi-flink${FLINK_VERSION}-bundle-${HUDI_VERSION}.jar
 -P $FLINK_HOME/lib/
 ./bin/sql-client.sh embedded -j 
lib/hudi-flink${FLINK_VERSION}-bundle-${HUDI_VERSION}.jar shell
 ```
@@ -79,14 +79,14 @@ The SQL CLI only executes the SQL line by line.
 
 <TabItem value="dataStream">
 
-Hudi works with Flink 1.13 (up to Hudi 0.14.x release), Flink 1.14, Flink 
1.15, Flink 1.16, Flink 1.17, and Flink 1.18.
+Hudi works with Flink 1.13 (up to Hudi 0.14.x release), Flink 1.14, Flink 
1.15, Flink 1.16, Flink 1.17, Flink 1.18, Flink 1.19 and Flink 1.20.
 Please add the desired dependency to your project:
 ```xml
 <!-- For Flink versions 1.13 - 1.18-->
 <properties>
     <flink.version>1.17.0</flink.version>
     <flink.binary.version>1.17</flink.binary.version>
-    <hudi.version>0.15.0</hudi.version>
+    <hudi.version>1.0.0</hudi.version>
 </properties>
 <dependency>
     <groupId>org.apache.hudi</groupId>
diff --git a/website/versioned_docs/version-1.0.0/quick-start-guide.md 
b/website/versioned_docs/version-1.0.0/quick-start-guide.md
index a9315c34e3b..ebb7f2e656f 100644
--- a/website/versioned_docs/version-1.0.0/quick-start-guide.md
+++ b/website/versioned_docs/version-1.0.0/quick-start-guide.md
@@ -58,7 +58,7 @@ From the extracted directory run spark-shell with Hudi:
 ```shell
 # For Spark versions: 3.3 - 3.5
 export SPARK_VERSION=3.5 # or 3.4, 3.3
-spark-shell --packages 
org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:0.15.0 \
+spark-shell --packages 
org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:1.0.0 \
 --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' \
 --conf 
'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog'
 \
 --conf 
'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension' \
@@ -74,7 +74,7 @@ From the extracted directory run pyspark with Hudi:
 # For Spark versions: 3.3 - 3.5
 export PYSPARK_PYTHON=$(which python3)
 export SPARK_VERSION=3.5 # or 3.4, 3.3
-pyspark --packages org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:0.15.0 
--conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --conf 
'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog'
 --conf 
'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension' 
--conf 'spark.kryo.registrator=org.apache.spark.HoodieSparkKryoRegistrar'
+pyspark --packages org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:1.0.0 
--conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --conf 
'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog'
 --conf 
'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension' 
--conf 'spark.kryo.registrator=org.apache.spark.HoodieSparkKryoRegistrar'
 ```
 </TabItem>
 
@@ -86,7 +86,7 @@ From the extracted directory run Spark SQL with Hudi:
 ```shell
 # For Spark versions: 3.3 - 3.5
 export SPARK_VERSION=3.5 # or 3.4, 3.3
-spark-sql --packages 
org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:0.15.0 \
+spark-sql --packages 
org.apache.hudi:hudi-spark$SPARK_VERSION-bundle_2.12:1.0.0 \
 --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' \
 --conf 
'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension' \
 --conf 
'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog'
 \

Reply via email to