This is an automated email from the ASF dual-hosted git repository.
wenjun pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/seatunnel.git
The following commit(s) were added to refs/heads/dev by this push:
new a14d9c0d08 [Feature][Connector-V2][Oss jindo] Fix the problem of jindo
driver download failure. (#5511)
a14d9c0d08 is described below
commit a14d9c0d08e2abb71e7bab2fd2f621abf76c7b94
Author: Guangdong Liu <[email protected]>
AuthorDate: Tue Sep 19 21:46:59 2023 +0800
[Feature][Connector-V2][Oss jindo] Fix the problem of jindo driver download
failure. (#5511)
---
docs/en/connector-v2/sink/OssJindoFile.md | 5 ++++-
docs/en/connector-v2/source/OssJindoFile.md | 7 +++++--
.../connector-file/connector-file-jindo-oss/pom.xml | 2 ++
3 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/docs/en/connector-v2/sink/OssJindoFile.md
b/docs/en/connector-v2/sink/OssJindoFile.md
index 1d098da009..b37628f923 100644
--- a/docs/en/connector-v2/sink/OssJindoFile.md
+++ b/docs/en/connector-v2/sink/OssJindoFile.md
@@ -8,6 +8,9 @@ Output data to oss file system using jindo api.
:::tip
+You need to download
[jindosdk-4.6.1.tar.gz](https://jindodata-binary.oss-cn-shanghai.aliyuncs.com/release/4.6.1/jindosdk-4.6.1.tar.gz)
+and then unzip it, copy jindo-sdk-4.6.1.jar and jindo-core-4.6.1.jar from lib
to ${SEATUNNEL_HOME}/lib.
+
If you use spark/flink, In order to use this connector, You must ensure your
spark/flink cluster already integrated hadoop. The tested hadoop version is 2.x.
If you use SeaTunnel Engine, It automatically integrated the hadoop jar when
you download and install SeaTunnel Engine. You can check the jar package under
${SEATUNNEL_HOME}/lib to confirm this.
@@ -237,7 +240,7 @@ For orc file format simple config
```bash
- OssFile {
+ OssJindoFile {
path="/seatunnel/sink"
bucket = "oss://tyrantlucifer-image-bed"
access_key = "xxxxxxxxxxx"
diff --git a/docs/en/connector-v2/source/OssJindoFile.md
b/docs/en/connector-v2/source/OssJindoFile.md
index f77c4a4543..72a6e96278 100644
--- a/docs/en/connector-v2/source/OssJindoFile.md
+++ b/docs/en/connector-v2/source/OssJindoFile.md
@@ -8,6 +8,9 @@ Read data from aliyun oss file system using jindo api.
:::tip
+You need to download
[jindosdk-4.6.1.tar.gz](https://jindodata-binary.oss-cn-shanghai.aliyuncs.com/release/4.6.1/jindosdk-4.6.1.tar.gz)
+and then unzip it, copy jindo-sdk-4.6.1.jar and jindo-core-4.6.1.jar from lib
to ${SEATUNNEL_HOME}/lib.
+
If you use spark/flink, In order to use this connector, You must ensure your
spark/flink cluster already integrated hadoop. The tested hadoop version is 2.x.
If you use SeaTunnel Engine, It automatically integrated the hadoop jar when
you download and install SeaTunnel Engine. You can check the jar package under
${SEATUNNEL_HOME}/lib to confirm this.
@@ -257,7 +260,7 @@ Filter pattern, which used for filtering files.
```hocon
- OssFile {
+OssJindoFile {
path = "/seatunnel/orc"
bucket = "oss://tyrantlucifer-image-bed"
access_key = "xxxxxxxxxxxxxxxxx"
@@ -270,7 +273,7 @@ Filter pattern, which used for filtering files.
```hocon
- OssFile {
+OssJindoFile {
path = "/seatunnel/json"
bucket = "oss://tyrantlucifer-image-bed"
access_key = "xxxxxxxxxxxxxxxxx"
diff --git
a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/pom.xml
b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/pom.xml
index bf2b660bbc..fd4120a075 100644
--- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/pom.xml
+++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/pom.xml
@@ -46,12 +46,14 @@
<groupId>com.aliyun.jindodata</groupId>
<artifactId>jindo-core</artifactId>
<version>${jindo-sdk.version}</version>
+ <scope>provided</scope>
</dependency>
<dependency>
<groupId>com.aliyun.jindodata</groupId>
<artifactId>jindosdk</artifactId>
<version>${jindo-sdk.version}</version>
+ <scope>provided</scope>
</dependency>
<dependency>