This is an automated email from the ASF dual-hosted git repository.

yangzhou pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/incubator-streampark.git


The following commit(s) were added to refs/heads/dev by this push:
     new f07c15c11 Chinese Notes to English Notes (#1677)
f07c15c11 is described below

commit f07c15c112fe3589594d1923be4c8d8e8b90c2b9
Author: ChunFu Wu <[email protected]>
AuthorDate: Sun Sep 25 14:33:17 2022 +0800

    Chinese Notes to English Notes (#1677)
---
 .../flink/submit/bean/DeployRequest.scala          |   2 +-
 .../flink/submit/bean/ShutDownRequest.scala        |   3 -
 .../flink/submit/test/YarnPerJobTestCase.scala     |  15 ++-
 .../streampark/plugin/profiling/util/Utils.java    |  10 +-
 .../streampark-spark-cli/create_app.sh             |  18 +--
 .../streampark-spark-cli/create_conf.sh            | 127 +++++++++++----------
 .../apache/streampark/spark/core/sink/EsSink.scala |   4 +-
 .../streampark/spark/core/sink/HBaseSink.scala     |   2 +-
 .../streampark/spark/core/sink/MySQLSink.scala     |   4 +-
 .../streampark/spark/core/sink/RichHBSink.scala    |   4 +-
 streampark-spark/streampark-spark-test/README.md   |   2 +-
 .../src/main/resources/logback.xml                 |  16 +--
 12 files changed, 105 insertions(+), 102 deletions(-)

diff --git 
a/streampark-flink/streampark-flink-submit/streampark-flink-submit-api/src/main/scala/org/apache/streampark/flink/submit/bean/DeployRequest.scala
 
b/streampark-flink/streampark-flink-submit/streampark-flink-submit-api/src/main/scala/org/apache/streampark/flink/submit/bean/DeployRequest.scala
index 216ccda21..f52a21b2d 100644
--- 
a/streampark-flink/streampark-flink-submit/streampark-flink-submit-api/src/main/scala/org/apache/streampark/flink/submit/bean/DeployRequest.scala
+++ 
b/streampark-flink/streampark-flink-submit/streampark-flink-submit-api/src/main/scala/org/apache/streampark/flink/submit/bean/DeployRequest.scala
@@ -39,7 +39,7 @@ case class DeployRequest(flinkVersion: FlinkVersion,
                          ) {
   private[submit] lazy val hdfsWorkspace = {
     /**
-      * 必须保持本机flink和hdfs里的flink版本和配置都完全一致.
+      * You must keep the flink version and configuration in the native flink 
and hdfs exactly the same.
       */
     val workspace = Workspace.remote
     val flinkHome = flinkVersion.flinkHome
diff --git 
a/streampark-flink/streampark-flink-submit/streampark-flink-submit-api/src/main/scala/org/apache/streampark/flink/submit/bean/ShutDownRequest.scala
 
b/streampark-flink/streampark-flink-submit/streampark-flink-submit-api/src/main/scala/org/apache/streampark/flink/submit/bean/ShutDownRequest.scala
index 5bd2aeeb9..4c1464b8b 100644
--- 
a/streampark-flink/streampark-flink-submit/streampark-flink-submit-api/src/main/scala/org/apache/streampark/flink/submit/bean/ShutDownRequest.scala
+++ 
b/streampark-flink/streampark-flink-submit/streampark-flink-submit-api/src/main/scala/org/apache/streampark/flink/submit/bean/ShutDownRequest.scala
@@ -22,9 +22,6 @@ import java.util.{Map => JavaMap}
 import org.apache.streampark.common.domain.FlinkVersion
 import org.apache.streampark.common.enums.ExecutionMode
 
-/**
-  * Created by xxyykkxx on 2022/4/8.
-  */
 case class ShutDownRequest(flinkVersion: FlinkVersion,
                             executionMode: ExecutionMode,
                             clusterId: String,
diff --git 
a/streampark-flink/streampark-flink-submit/streampark-flink-submit-core/src/test/scala/org/apache/streampark/flink/submit/test/YarnPerJobTestCase.scala
 
b/streampark-flink/streampark-flink-submit/streampark-flink-submit-core/src/test/scala/org/apache/streampark/flink/submit/test/YarnPerJobTestCase.scala
index b9be7ebec..fdbc99d66 100644
--- 
a/streampark-flink/streampark-flink-submit/streampark-flink-submit-core/src/test/scala/org/apache/streampark/flink/submit/test/YarnPerJobTestCase.scala
+++ 
b/streampark-flink/streampark-flink-submit/streampark-flink-submit-core/src/test/scala/org/apache/streampark/flink/submit/test/YarnPerJobTestCase.scala
@@ -42,12 +42,12 @@ import scala.collection.JavaConversions._
 import scala.collection.JavaConverters._
 
 /**
- * perJob 编程方式提交任务,
+ * perJob to submit jobs programmatically,
  */
 object YarnPerJobTestCase extends Logger {
 
   /**
-   * 必须要在本机安装部署flink,并且配置FLINK_HOME
+   * You must install and deploy flink locally, and configure FLINK_HOME
    */
   lazy val FLINK_HOME = {
     val flinkLocalHome = System.getenv("FLINK_HOME")
@@ -56,16 +56,16 @@ object YarnPerJobTestCase extends Logger {
   }
 
   /**
-   * SocketWindowWordCount.jar 在flink/examples下载自带的示例程序
+   * SocketWindowWordCount.jar Download the built-in sample program in 
flink/examples
    */
   val userJar = s"$FLINK_HOME/examples/streaming/SocketWindowWordCount.jar"
   /**
-   * 运行该程序需要传入的参数
+   * Parameters required to run the program
    */
   val programArgs = "--hostname localhost --port 9999"
 
   /**
-   * 运行指定的option参数
+   * Run the specified option parameter
    */
   val option = "-e yarn-per-job -p 2 -n"
 
@@ -83,9 +83,8 @@ object YarnPerJobTestCase extends Logger {
   }
 
   /**
-   * 瞒天过海,偷天换日,鱼目混珠.
-   * 反射出YarnClusterDescriptor的私有方法deployInternal,
-   * 主要是为了传入applicationName,原始该方法里applicationName是写死的.
+   * The private method deployInternal of YarnClusterDescriptor is reflected, 
mainly to pass in applicationName.
+   * In the original method, applicationName is hard-coded.
    */
   lazy val deployInternalMethod: Method = {
     val paramClass = Array(
diff --git 
a/streampark-plugin/streampark-jvm-profiler/src/main/java/org/apache/streampark/plugin/profiling/util/Utils.java
 
b/streampark-plugin/streampark-jvm-profiler/src/main/java/org/apache/streampark/plugin/profiling/util/Utils.java
index c35bc200e..49271e7d5 100644
--- 
a/streampark-plugin/streampark-jvm-profiler/src/main/java/org/apache/streampark/plugin/profiling/util/Utils.java
+++ 
b/streampark-plugin/streampark-jvm-profiler/src/main/java/org/apache/streampark/plugin/profiling/util/Utils.java
@@ -72,21 +72,21 @@ public class Utils {
     }
 
     public static String zipString(String text) {
-        // 使用指定的压缩级别创建一个新的压缩器。
+        // Creates a new compressor with the specified compression level.
         Deflater deflater = new Deflater(Deflater.BEST_COMPRESSION);
-        // 设置压缩输入数据。
+        // Sets compressed input data.
         deflater.setInput(text.getBytes());
-        // 当被调用时,表示压缩应该以输入缓冲区的当前内容结束。
+        // When called, indicates that compression should end with the current 
contents of the input buffer.
         deflater.finish();
         final byte[] bytes = new byte[256];
         ByteArrayOutputStream outputStream = new ByteArrayOutputStream(256);
 
         while (!deflater.finished()) {
-            // 压缩输入数据并用压缩数据填充指定的缓冲区。
+            // Compresses the input data and fills the specified buffer with 
the compressed data.
             int length = deflater.deflate(bytes);
             outputStream.write(bytes, 0, length);
         }
-        // 关闭压缩器并丢弃任何未处理的输入。
+        // Turn off the compressor and discard any unprocessed input.
         deflater.end();
         return Base64.getEncoder().encodeToString(outputStream.toByteArray());
     }
diff --git a/streampark-spark/streampark-spark-cli/create_app.sh 
b/streampark-spark/streampark-spark-cli/create_app.sh
index bd87460c2..91c5d8426 100644
--- a/streampark-spark/streampark-spark-cli/create_app.sh
+++ b/streampark-spark/streampark-spark-cli/create_app.sh
@@ -81,7 +81,7 @@ create_module() {
     cp -r ${base}/bin ${name}/${module}/assembly/bin
        cp ${name}/default.properties 
${name}/${module}/${conf_dir}/${module}.properties
 
-       echo "# $module 模块说明文档" >${name}/${module}/README.md
+       echo "# $module module documentation" >${name}/${module}/README.md
        cat > ${name}/${module}/assembly.xml <<EOF
 
 <assembly>
@@ -100,7 +100,7 @@ create_module() {
         </dependencySet>
         <dependencySet>
             <!--
-               不使用项目的artifact,第三方jar不要解压
+               Do not use the artifact of the project, do not decompress the 
third-party jar
             -->
             <useProjectArtifact>false</useProjectArtifact>
             <outputDirectory>lib</outputDirectory>
@@ -119,7 +119,7 @@ create_module() {
             <directory>\${project.basedir}/assembly</directory>
             <outputDirectory>/</outputDirectory>
         </fileSet>
-        <!-- 把项目自己编译出来的jar文件,打包进gz文件的lib目录 -->
+        <!-- Package the jar file compiled by the project itself into the lib 
directory of the gz file -->
         <fileSet>
             <directory>\${project.build.directory}</directory>
             <outputDirectory>lib</outputDirectory>
@@ -224,11 +224,11 @@ cat > $name/pom.xml <<EOF
         <profile>
             <!--
             mvn clean package
-            直接打包,只会将 assembly.xml 文件中
+            Direct packaging, only the assembly.xml file will be
             <includes>
                 <include>org.apache.streampark:streampark-spark</include>
             </includes>
-            包含的Jar包打包进去
+            The included Jar package is packaged in
              -->
             <id>default</id>
             <properties>
@@ -241,12 +241,12 @@ cat > $name/pom.xml <<EOF
         <profile>
             <!--
              mvn clean package -Pwithjar -Dmaven.test.skip=true
-            包含依赖jar打包,会将assembly.xml 文件中
+            Including dependent jar packaging, will be included in the 
assembly.xml file
             <includes>
                 <include>org.apache.streampark.spark:spark-core</include>
             </includes>
-            包含的Jar和pom中设置 <scope>\${project.scope}</scope> 的jar一起打包进去,
-            这样蹩脚的设计,主要是因为我不知道怎么能优雅的把 运行、编译都依赖的JarA 抽离出来
+            The included Jar is packaged with the jar set in the pom 
<scope>\${project.scope}</scope>,
+            Such a crappy design is mainly because I don't know how to 
elegantly extract the JarA that depends on running and compiling.
              -->
             <id>withjar</id>
             <properties>
@@ -386,7 +386,7 @@ create_git_filter() {
 }
 
 create_readme() {
-       echo -e "# $name 项目说明" >$name/README.md
+       echo -e "# $name project instruction" >$name/README.md
 }
 
 create_default_conf() {
diff --git a/streampark-spark/streampark-spark-cli/create_conf.sh 
b/streampark-spark/streampark-spark-cli/create_conf.sh
index 95a9ddc15..48a9e5ef3 100644
--- a/streampark-spark/streampark-spark-cli/create_conf.sh
+++ b/streampark-spark/streampark-spark-cli/create_conf.sh
@@ -59,112 +59,119 @@ function _p(){
        echo -e "$(_n $notes)\n${ph}.${k}=${v}\n"
 }
 
-#提交脚本run.sh需要的几个配置
+# Several configurations required to submit the script run.sh
 function user_run_params(){
        local Lprefix="spark.run"
-       _p "必须设置,执行class的全包名称" "main" " "
-       _p "必须设置,包含main class的jar包名称\njar文件必须包含在lib.path当中" "main.jar" " "
-       _p "提供给执行class的命令行参数,多个参数之间用逗号隔开,参数中不能包含空格等空白符\nEx:param1,param2,.." \
+       _p "Must be set to execute the full package name of the class" "main" " 
"
+       _p "Must be set, the name of the jar package containing the main 
class\njar file must be included in lib.path" "main.jar" " "
+       _p "The command line parameters provided to execute the class. Multiple 
parameters are separated by commas. The parameters cannot contain spaces such 
as spaces\nEx:param1,param2,.." \
        "self.params" " "
-       _p "用户代码依赖jar包的所在目录\n可以是绝对路径,也可以是相对此配置文件的相对路径,相对路径会自动补全" "lib.path" " "
+       _p "The directory where the user code depends on the jar package\n can 
be an absolute path or a relative path relative to this configuration file, and 
the relative path will be automatically completed" "lib.path" " "
 }
 
-#spark任务提交需要的几个基础配置
+# Several basic configurations required for spark task submission
 function spark_run_params(){
-       _p "执行集群设置,不用设置,一般使用YARN" "master" "yarn"
-       _p "YARN部署模式\ndefault=cluster" "submit.deployMode" "cluster"
-       _p "spark-streaming每个批次间隔时间\ndefault=300" "batch.duration" "300"
-       _p "spark on yarn的任务提交队列\ndefault=defalut" "yarn.queue" "default"
-       _p "spark 任务名称配置,建议保持任务名称全局唯一\n这样可以在设计任务失败的时候根据名称做一些唯一处理\n不设置使用类全名.App" 
\
+       _p "Perform cluster settings, do not need to set, generally use YARN" 
"master" "yarn"
+       _p "YARN deployment mode\ndefault=cluster" "submit.deployMode" "cluster"
+       _p "spark-streaming interval time per batch\ndefault=300" 
"batch.duration" "300"
+       _p "Task submission queue of spark on yarn\default=default" 
"yarn.queue" "default"
+       _p "Spark task name configuration, it is recommended to keep the task 
name globally unique\nThis can do some unique processing according to the name 
when the design task fails\nDo not set the use of the full class name.App" \
        "app.name" ""
-       _p 
"spark网络序列化方式,默认是JavaSerializer,可针对所有类型但速度较慢\n这里使用推荐的Kryo方式\nkafka-0.10必须使用此方式" 
\
+       _p "The spark network serialization method, the default is 
JavaSerializer, which can be used for all types but is slower\nThe recommended 
Kryo method is used here\nkafka-0.10 must use this method" \
        "serializer" "org.apache.spark.serializer.KryoSerializer"
 
-       _p "++++++++++++++++++++++Driver节点相关配置+++++++++++++++++++++++++++"
+       _p "++++++++++++++++++++++Driver node related 
configuration+++++++++++++++++++++++++++"
        local Lprefix="spark.driver"
-       _p "Driver节点使用内存大小设置\ndefault=512MB" "memory" "512MB"
-       _p "Driver节点使用的cpu个数设置\ndefault=1" "cores" "1"
-       _p 
"Driver节点构建时spark-jar和user-jar冲突时优先使用用户提供的,这是一个实验性质的参数只对cluster模式有效\ndefault=false"
 \
+       _p "Driver node uses memory size setting\ndefault=512MB" "memory" 
"512MB"
+       _p "The number of CPUs used by the Driver node is set\ndefault=1" 
"cores" "1"
+       _p "When the driver node is built, spark-jar and user-jar conflict with 
user-supplied first. This is an experimental parameter that is only valid for 
cluster mode\ndefault=false" \
        "userClassPathFirst" "false"
 
-       _p "++++++++++++++++++++++Executor节点相关配置+++++++++++++++++++++++++"
+       _p "++++++++++++++++++++++Executor node related configurationExecutor 
node related configuration+++++++++++++++++++++++++"
        Lprefix="spark.executor"
-       _p "Executor个数设置\ndefault=1" "instances" "1"
-       _p "Executor使用cpu个数设置\ndefault=1" "cores" "1"
-       _p "Executor使用内存大小设置\ndefault=512MB" "memory" "512MB"
-       _p "同driver节点配置作用相同,但是是针对executor的\ndefault=false" "userClassPathFirst" 
"false"
+       _p "Executor number setting\ndefault=1" "instances" "1"
+       _p "Executor uses the number of cpu settings\ndefault=1" "cores" "1"
+       _p "Executor uses the memory size setting\ndefault=512MB" "memory" 
"512MB"
+       _p "The same as the driver node configuration, but for the 
executor\ndefault=false" "userClassPathFirst" "false"
 }
 
-#spark 任务动态资源分配的配置
+# Configuration of dynamic resource allocation for spark tasks
 function spark_dynamic_params(){
-       _p "++++++++++++++++++++++++Executor动态分配相关配置++++++++++++++++++++"
+       _p "++++++++++++++++++++++++Executor dynamically allocates related 
configuration++++++++++++++++++++"
        local Lprefix="spark.shuffle.service"
-       _p "Executor动态分配的前置服务\ndefault=false" "enabled" "true"
-       _p "服务对应的端口,此端口服务是配置在yarn-site中的,由NodeManager服务加载启动\ndefault=7337" 
"port" "7337"
+       _p "Front-end services dynamically allocated by 
Executor\ndefault=false" "enabled" "true"
+       _p "The port corresponding to the service. This port service is 
configured in yarn-site and loaded and started by the NodeManager 
service\ndefault=7337" "port" "7337"
 
        Lprefix="spark.dynamicAllocation"
-       _p "配置是否启用资源动态分配,此动态分配是针对executor的,需要yarn集群配置支持动态分配\ndefault=false" \
+       _p "Configure whether to enable dynamic resource allocation. This 
dynamic allocation is for executors and requires yarn cluster configuration to 
support dynamic allocation\ndefault=false" \
        "enabled" "true"
-       _p "释放空闲的executor的时间\ndefault=60s" "executorIdleTimeout" "60s"
-       _p "有缓存的executor空闲释放时间\ndefault=infinity(默认不释放)" 
"cachedExecutorIdleTimeout" "-1"
-       _p 
"初始化executor的个数,如果设置spark.executor.instances谁小用谁\ndefault=minExecutors(不设置使用此项配置值)"
 \
+       _p "Time to release idle executors\ndefault=60s" "executorIdleTimeout" 
"60s"
+       _p "Idle release time of cached executors\ndefault=infinity (not 
released by default)" "cachedExecutorIdleTimeout" "-1"
+       _p "Initialize the number of executors. If spark.executor.instances is 
set, whoever uses it is smaller\ndefault=minExecutors (do not use this 
configuration value)" \
        "initialExecutors" "1"
-       _p "executor动态分配可分配最大数量\ndefault=infinity" "maxExecutors" "60"
-       _p "executor动态收缩的最小数量\ndefault=0" "minExecutors" "1"
-       _p "批次调度延迟多长时间开始增加executor\ndefault=1s" "schedulerBacklogTimeout" "1s"
-       _p "同上,但是是针对之后的请求\ndefault=SchedulerBacklogTimeout(不设置使用此项配置值)" \
+       _p "The maximum number of executors that can be allocated dynamically 
by executor\ndefault=infinity" "maxExecutors" "60"
+       _p "Minimum number of executors to shrink dynamically\ndefault=0" 
"minExecutors" "1"
+       _p "How long the batch scheduling delay starts to increase the 
executor\ndefault=1s" "schedulerBacklogTimeout" "1s"
+       _p "Same as above, but for subsequent 
requests\ndefault=SchedulerBacklogTimeout (do not use this configuration 
value)" \
        "sustainedSchedulerBacklogTimeout" "1s"
 }
 
-#消费kafka需要的基础配置
+# Basic configuration required to consume kafka
 function streampark_spark_source_kafka(){
        create_notes "\nStreamPark-Spark Kafka Source\nbase config\n"
-       _p "spark.source.kafka.consume后面的配置是标准kafka配置"
+       _p "The configuration behind spark.source.kafka.consume is the standard 
kafka configuration"
        local Lprefix="spark.source.kafka.consume"
-       _p "kafka消费的topics配置,可以配置多个,每个topic之间用逗号[,]隔开\ndefault=" "topics" ""
+       _p "The configuration of topics consumed by kafka can be configured 
multiple times.
+       Each topic is separated by a comma [,]\ndefault=" "topics" ""
        _p "kafka consumer的group id.\ndefault=kafka.consumer.001" "group.id" 
"kafka.consumer.001"
-       _p "kafka集群的主机和端口号,可以配置多个,每个主机之间用逗号[,]隔开\ndefault=" "bootstrap.servers" 
""
-       _p "第一次消费kafka topic的时候指定从什么位置消费
-               有两个可选值latest[最新位置],earliest[最早位置]\ndefault=earliest" 
"auto.offset.reset" "earliest"
-       _p "spark.source.kafka" "spark消费kafka的时候如何管理offset
-               这里可选的值有三种hbase,redis,kafka每种值对应一种存储方式\ndefault=kafka" 
"offset.store.type" "kafka"
-       _p "自定义spark管理kafka 
offset的方法,需要指定一个自定义类的名称\nspark.source.kafka.offset.store.class=none"
-       _p "新版本kafka使用的key序列化方式\ndefault=java.Serialization" \
+       _p "The host and port number of the kafka cluster. Multiple hosts can 
be configured.
+       Each host is separated by a comma [,]\ndefault=" "bootstrap.servers" ""
+       _p "When consuming a kafka topic for the first time, specify where to 
consume from.
+       There are two optional values, latest[latest location],
+       earliest[earliest location]\ndefault=earliest" "auto.offset.reset" 
"earliest"
+       _p "spark.source.kafka" "How to manage offset when Spark consumes kafka.
+       There are three optional values: hbase, redis, and kafka.
+       Each value corresponds to a storage method ndefault=kafka" 
"offset.store.type" "kafka"
+       _p "To customize the method of spark management kafka offset,
+       you need to specify the name of a custom 
class\nspark.source.kafka.offset.store.class=none"
+       _p "The key serialization method used by the new version of 
kafka\ndefault=java.Serialization" \
                "key.deserializer" 
"org.apache.kafka.common.serialization.StringDeserializer"
-       _p "最新版kafka使用的value序列化方式\ndefault=java.Serialization" \
+       _p "The value serialization method used by the latest version of 
kafka\ndefault=java.Serialization" \
                "value.deserializer" 
"org.apache.kafka.common.serialization.StringDeserializer"
-       _p "获取一次数据的最大长度,此值的大小需要kafka server端支持\ndefault=10485760" 
"max.partition.fetch.bytes" "10485760"
-       _p "获取一次数据请求的最大等待时间\ndefault=3000" "fetch.max.wait.ms" "3000"
+       _p "Get the maximum length of data at one time,
+       the size of this value needs to be supported by the kafka 
server\ndefault=10485760" "max.partition.fetch.bytes" "10485760"
+       _p "Get the maximum waiting time for a data request\ndefault=3000" 
"fetch.max.wait.ms" "3000"
 }
 
 function streampark_spark_sink_redis(){
        create_notes "\nStreamPark-Spark Redis Sink\nbase config\n"
-       _p "StreamPark-Spark redis sink需要的几个配置"
+       _p "Several configurations required by StreamPark-Spark redis sink"
        local Lprefix="spark.sink.redis"
-       _p "redis主机" "host" ""
-       _p "redis端口" "port" "6379"
-       _p "redis数据库" "db" "0"
-       _p "redis连接超时时间" "timeout" "30"
+       _p "redis host" "host" ""
+       _p "redis port" "port" "6379"
+       _p "redis database" "db" "0"
+       _p "redis connection timeout" "timeout" "30"
 }
 
 function streampark_spark_sink_influx(){
        create_notes "\nStreamPark-Spark InfluxDB Sink\nbase config\n"
-       _p "StreamPark-Spark influxDB sink需要的几个配置"
+       _p "Several configurations required by StreamPark-Spark influxDB sink"
        local Lprefix="spark.sink.influxDB"
-       _p "influxDB主机" "host" ""
-       _p "influxDB端口" "port" "8086"
-       _p "influxDB数据库" "db" ""
+       _p "influxDB host" "host" ""
+       _p "influxDB port" "port" "8086"
+       _p "influxDB database" "db" ""
 }
 
 function streampark_spark_congestion_monitor(){
        create_notes "\nStreamPark-Spark Monitor\nCongestion base config\n"
-       _p "StreamPark-Spark 自带的拥堵监控需要的几个参数"
+       _p "Several parameters required for the congestion monitoring that 
comes with StreamPark-Spark"
        local Lprefix="spark.monitor.congestion"
-       _p "钉钉机器人发送消息的api地址,需要从http开头的全路径" "send.api" ""
-       _p "堆积了几个批次之后开始告警,默认是0不告警\ndefault=0" "batch" "0"
-       _p "钉钉联系人注册钉钉使用的手机号" "ding.to" ""
+       _p "The api address of the message sent by the DingTalk robot requires 
a full path starting with http" "send.api" ""
+       _p "After a few batches are accumulated, the alarm will start. The 
default is 0, no alarm.\ndefault=0" "batch" "0"
+       _p "The mobile phone number used by DingTalk contacts to register with 
DingTalk" "ding.to" ""
        Lprefix="spark.monitor.suicide"
-       _p "堆积多少个批次之后kill掉任务,默认是0不kill,配合任务自动重启功能可有效重启堆积任务使恢复\ndefault=0" 
"batch" "0"
+       _p "How many batches are accumulated to kill the task, the default is 0 
not to kill,
+       with the automatic task restart function can effectively restart the 
accumulated task to restore \ndefault=0" "batch" "0"
 }
 
 function create_default(){
diff --git 
a/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/EsSink.scala
 
b/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/EsSink.scala
index fa8386d84..1eefb9e8a 100644
--- 
a/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/EsSink.scala
+++ 
b/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/EsSink.scala
@@ -28,7 +28,7 @@
 ///**
 //  *
 //  *
-//  * 输出ES
+//  * output ES
 //  */
 //class EsSink[T](@transient override val sc: SparkContext, initParams: 
Map[String, String] = Map.empty[String, String])
 //  extends Sink[T] {
@@ -39,7 +39,7 @@
 //  lazy val esParam: Map[String, String] = 
(param++initParams).filter(_._1.startsWith(prefix))
 //
 //  /**
-//    * 输出
+//    * output
 //    *
 //    */
 //  def sink(rdd: RDD[T], time: Time = Time(System.currentTimeMillis())): Unit 
= {
diff --git 
a/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/HBaseSink.scala
 
b/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/HBaseSink.scala
index 16f1cee3d..ccef2b5ab 100644
--- 
a/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/HBaseSink.scala
+++ 
b/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/HBaseSink.scala
@@ -55,7 +55,7 @@
 //    connection.getTable(TableName.valueOf(tableName))
 //  }
 //
-//  /** 输出
+//  /** output
 //   *
 //   * @param rdd  RDD[Put]或者RDD[Delete]
 //   * @param time spark.streaming.Time
diff --git 
a/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/MySQLSink.scala
 
b/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/MySQLSink.scala
index 817d291f0..7a1947803 100644
--- 
a/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/MySQLSink.scala
+++ 
b/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/MySQLSink.scala
@@ -56,7 +56,7 @@
 //
 //
 //  /**
-//   * 输出 到 Mysql
+//   * output to Mysql
 //   *
 //   * @param rdd
 //   * @param time
@@ -68,7 +68,7 @@
 //    val begin = System.currentTimeMillis()
 //
 //    val df = rdd.toDF()
-//    //     写入 Mysql
+//    // write to Mysql
 //    df.write.mode(saveMode).jdbc(url, table, prop)
 //    val count = df.count()
 //    val end = System.currentTimeMillis()
diff --git 
a/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/RichHBSink.scala
 
b/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/RichHBSink.scala
index d46d50a9e..243ba0b40 100644
--- 
a/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/RichHBSink.scala
+++ 
b/streampark-spark/streampark-spark-core/src/main/scala/org/apache/streampark/spark/core/sink/RichHBSink.scala
@@ -54,7 +54,7 @@
 //  private def getTable: Table = 
getConnect.getTable(TableName.valueOf(tableName))
 //
 //  /**
-//   * 输出
+//   * output
 //   *
 //   * @param rdd  spark.RDD
 //   * @param time spark.streaming.Time
@@ -76,7 +76,7 @@
 //  }
 //
 //  /**
-//   * 批量插入
+//   * Bulk insert
 //   *
 //   * @param actions
 //   */
diff --git a/streampark-spark/streampark-spark-test/README.md 
b/streampark-spark/streampark-spark-test/README.md
index d6d86f6d2..6f4602373 100644
--- a/streampark-spark/streampark-spark-test/README.md
+++ b/streampark-spark/streampark-spark-test/README.md
@@ -1 +1 @@
-# test 模块说明文档
+# test module documentation
diff --git 
a/streampark-spark/streampark-spark-test/src/main/resources/logback.xml 
b/streampark-spark/streampark-spark-test/src/main/resources/logback.xml
index 7961c96f4..69ddb9c8f 100644
--- a/streampark-spark/streampark-spark-test/src/main/resources/logback.xml
+++ b/streampark-spark/streampark-spark-test/src/main/resources/logback.xml
@@ -18,7 +18,7 @@
 
 -->
 <configuration>
-    <!-- 日志文件存储路径 -->
+    <!-- log file storage path -->
     <property name="LOG_HOME" value="${app.home}/logs/"/>
     <property name="FILE_SIZE" value="50MB"/>
     <property name="MAX_HISTORY" value="100"/>
@@ -29,13 +29,13 @@
     <property name="log.pattern"
               value="%d{yyyy-MM-dd HH:mm:ss.SSS} %contextName [%thread] 
%-5level %logger{36} - %msg%n"/>
 
-    <!-- 控制台打印 -->
+    <!-- console print -->
     <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
         <encoder charset="utf-8">
             <pattern>${log.colorPattern}</pattern>
         </encoder>
     </appender>
-    <!-- ERROR 输入到文件,按日期和文件大小 -->
+    <!-- ERROR input to file, by date and file size -->
     <appender name="ERROR" 
class="ch.qos.logback.core.rolling.RollingFileAppender">
         <encoder charset="utf-8">
             <pattern>${log.pattern}</pattern>
@@ -54,7 +54,7 @@
         </rollingPolicy>
     </appender>
 
-    <!-- WARN 输入到文件,按日期和文件大小 -->
+    <!-- WARN input to file, by date and file size -->
     <appender name="WARN" 
class="ch.qos.logback.core.rolling.RollingFileAppender">
         <encoder charset="utf-8">
             <pattern>${log.pattern}</pattern>
@@ -73,7 +73,7 @@
         </rollingPolicy>
     </appender>
 
-    <!-- INFO 输入到文件,按日期和文件大小 -->
+    <!-- INFO input to file, by date and file size -->
     <appender name="INFO" 
class="ch.qos.logback.core.rolling.RollingFileAppender">
         <encoder charset="utf-8">
             <pattern>${log.pattern}</pattern>
@@ -91,7 +91,7 @@
             </timeBasedFileNamingAndTriggeringPolicy>
         </rollingPolicy>
     </appender>
-    <!-- DEBUG 输入到文件,按日期和文件大小 -->
+    <!-- DEBUG input to file, by date and file size -->
     <appender name="DEBUG" 
class="ch.qos.logback.core.rolling.RollingFileAppender">
         <encoder charset="utf-8">
             <pattern>${log.pattern}</pattern>
@@ -110,7 +110,7 @@
             </timeBasedFileNamingAndTriggeringPolicy>
         </rollingPolicy>
     </appender>
-    <!-- TRACE 输入到文件,按日期和文件大小 -->
+    <!-- TRACE input to file, by date and file size -->
     <appender name="TRACE" 
class="ch.qos.logback.core.rolling.RollingFileAppender">
         <encoder charset="utf-8">
             <pattern>${log.pattern}</pattern>
@@ -130,7 +130,7 @@
         </rollingPolicy>
     </appender>
 
-    <!-- Logger 根目录 -->
+    <!-- Logger root directory -->
     <root level="INFO">
         <appender-ref ref="STDOUT"/>
         <appender-ref ref="DEBUG"/>

Reply via email to