This is an automated email from the ASF dual-hosted git repository.
github-bot pushed a commit to branch asf-site
in repository
https://gitbox.apache.org/repos/asf/incubator-dolphinscheduler-website.git
The following commit(s) were added to refs/heads/asf-site by this push:
new ac27568 Automated deployment: Thu Jul 30 08:38:44 UTC 2020
3c226e6d050aa3dd28e26479c4fbe15b01131c2d
ac27568 is described below
commit ac27568d642dcaf91ea04289d67f71ed267f2ec4
Author: dailidong <[email protected]>
AuthorDate: Thu Jul 30 08:38:44 2020 +0000
Automated deployment: Thu Jul 30 08:38:44 UTC 2020
3c226e6d050aa3dd28e26479c4fbe15b01131c2d
---
build/blog.js | 2 +-
build/blogDetail.js | 2 +-
build/community.js | 2 +-
build/documentation.js | 2 +-
build/home.js | 2 +-
en-us/docs/release/faq.html | 212 +++++++++++++++++++++++++++-
en-us/docs/release/faq.json | 2 +-
zh-cn/docs/release/faq.html | 332 ++++++++++++++++++++++++++++++++++----------
zh-cn/docs/release/faq.json | 2 +-
9 files changed, 474 insertions(+), 84 deletions(-)
diff --git a/build/blog.js b/build/blog.js
index 92beda7..92b7767 100644
--- a/build/blog.js
+++ b/build/blog.js
@@ -138,7 +138,7 @@ var
t={1:"'inci",5:"'inci",8:"'inci",70:"'inci",80:"'inci",2:"'nci",7:"'nci",20:
//! moment.js locale configuration
function t(e,t){var n=e.split("_");return
t%10==1&&t%100!=11?n[0]:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?n[1]:n[2]}function
n(e,n,r){var
o={ss:n?"секунда_секунди_секунд":"секунду_секунди_секунд",mm:n?"хвилина_хвилини_хвилин":"хвилину_хвилини_хвилин",hh:n?"година_години_годин":"годину_години_годин",dd:"день_дні_днів",MM:"місяць_місяці_місяців",yy:"рік_роки_років"};return"m"===r?n?"хвилина":"хвилину":"h"===r?n?"година":"годину":e+"
"+t(o[r],+e)}function r(e,t){var n,r={nominative:"неділя_ [...]
//! moment.js locale configuration
-var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
+var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
* react-is.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
diff --git a/build/blogDetail.js b/build/blogDetail.js
index 505fcc1..4a94c6e 100644
--- a/build/blogDetail.js
+++ b/build/blogDetail.js
@@ -138,7 +138,7 @@ var
t={1:"'inci",5:"'inci",8:"'inci",70:"'inci",80:"'inci",2:"'nci",7:"'nci",20:
//! moment.js locale configuration
function t(e,t){var n=e.split("_");return
t%10==1&&t%100!=11?n[0]:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?n[1]:n[2]}function
n(e,n,r){var
o={ss:n?"секунда_секунди_секунд":"секунду_секунди_секунд",mm:n?"хвилина_хвилини_хвилин":"хвилину_хвилини_хвилин",hh:n?"година_години_годин":"годину_години_годин",dd:"день_дні_днів",MM:"місяць_місяці_місяців",yy:"рік_роки_років"};return"m"===r?n?"хвилина":"хвилину":"h"===r?n?"година":"годину":e+"
"+t(o[r],+e)}function r(e,t){var n,r={nominative:"неділя_ [...]
//! moment.js locale configuration
-var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
+var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
* react-is.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
diff --git a/build/community.js b/build/community.js
index 6ad82d9..c5e227b 100644
--- a/build/community.js
+++ b/build/community.js
@@ -138,7 +138,7 @@ var
t={1:"'inci",5:"'inci",8:"'inci",70:"'inci",80:"'inci",2:"'nci",7:"'nci",20:
//! moment.js locale configuration
function t(e,t){var n=e.split("_");return
t%10==1&&t%100!=11?n[0]:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?n[1]:n[2]}function
n(e,n,r){var
o={ss:n?"секунда_секунди_секунд":"секунду_секунди_секунд",mm:n?"хвилина_хвилини_хвилин":"хвилину_хвилини_хвилин",hh:n?"година_години_годин":"годину_години_годин",dd:"день_дні_днів",MM:"місяць_місяці_місяців",yy:"рік_роки_років"};return"m"===r?n?"хвилина":"хвилину":"h"===r?n?"година":"годину":e+"
"+t(o[r],+e)}function r(e,t){var n,r={nominative:"неділя_ [...]
//! moment.js locale configuration
-var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
+var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
* react-is.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
diff --git a/build/documentation.js b/build/documentation.js
index f0f547a..50ea71a 100644
--- a/build/documentation.js
+++ b/build/documentation.js
@@ -138,7 +138,7 @@ var
t={1:"'inci",5:"'inci",8:"'inci",70:"'inci",80:"'inci",2:"'nci",7:"'nci",20:
//! moment.js locale configuration
function t(e,t){var n=e.split("_");return
t%10==1&&t%100!=11?n[0]:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?n[1]:n[2]}function
n(e,n,r){var
o={ss:n?"секунда_секунди_секунд":"секунду_секунди_секунд",mm:n?"хвилина_хвилини_хвилин":"хвилину_хвилини_хвилин",hh:n?"година_години_годин":"годину_години_годин",dd:"день_дні_днів",MM:"місяць_місяці_місяців",yy:"рік_роки_років"};return"m"===r?n?"хвилина":"хвилину":"h"===r?n?"година":"годину":e+"
"+t(o[r],+e)}function r(e,t){var n,r={nominative:"неділя_ [...]
//! moment.js locale configuration
-var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
+var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
* react-is.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
diff --git a/build/home.js b/build/home.js
index 147210c..ce8dc93 100644
--- a/build/home.js
+++ b/build/home.js
@@ -138,7 +138,7 @@ var
t={1:"'inci",5:"'inci",8:"'inci",70:"'inci",80:"'inci",2:"'nci",7:"'nci",20:
//! moment.js locale configuration
function t(e,t){var n=e.split("_");return
t%10==1&&t%100!=11?n[0]:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?n[1]:n[2]}function
n(e,n,r){var
o={ss:n?"секунда_секунди_секунд":"секунду_секунди_секунд",mm:n?"хвилина_хвилини_хвилин":"хвилину_хвилини_хвилин",hh:n?"година_години_годин":"годину_години_годин",dd:"день_дні_днів",MM:"місяць_місяці_місяців",yy:"рік_роки_років"};return"m"===r?n?"хвилина":"хвилину":"h"===r?n?"година":"годину":e+"
"+t(o[r],+e)}function r(e,t){var n,r={nominative:"неділя_ [...]
//! moment.js locale configuration
-var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
+var
t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر"],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return
e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D
MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY
HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return
e<12?"صبح":"شام"},calendar [...]
* react-is.production.min.js
*
* Copyright (c) Facebook, Inc. and its affiliates.
diff --git a/en-us/docs/release/faq.html b/en-us/docs/release/faq.html
index df5e0e3..05f5171 100644
--- a/en-us/docs/release/faq.html
+++ b/en-us/docs/release/faq.html
@@ -12,7 +12,10 @@
<link rel="stylesheet" href="/build/documentation.css" />
</head>
<body>
- <div id="root"><div class="documentation-page"
data-reactroot=""><header class="header-container header-container-normal"><div
class="header-body"><a href="/en-us/index.html"><img class="logo"
src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span
class="icon-search"></span></div><span class="language-switch
language-switch-normal">中</span><div class="header-menu"><img
class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul
class="ant-menu blackClass ant [...]
+ <div id="root"><div class="documentation-page"
data-reactroot=""><header class="header-container header-container-normal"><div
class="header-body"><a href="/en-us/index.html"><img class="logo"
src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span
class="icon-search"></span></div><span class="language-switch
language-switch-normal">中</span><div class="header-menu"><img
class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul
class="ant-menu blackClass ant [...]
+<p>A: Before version 1.2 project name is called EasyScheduler, version 1.2 and
later it's called DolphinScheduler.</p>
+<hr>
+<h2>Q: DolphinScheduler service introduction and recommended running
memory</h2>
<p>A: DolphinScheduler consists of 5 services, MasterServer, WorkerServer,
ApiServer, AlertServer, LoggerServer and UI.</p>
<table>
<thead>
@@ -53,16 +56,18 @@
<p>A: Please refer to 'System parameter' in the system-manual</p>
<hr>
<h2>Q: pip install kazoo This installation gives an error. Is it necessary to
install?</h2>
-<p>A: This is the python connection Zookeeper needs to use, must be
installed</p>
+<p>A: This is the python connection Zookeeper needs to use, it is used to
delete the master/worker temporary node info in the Zookeeper. so you can
ignore error if it's your first install. after version 1.3.0, kazoo is not been
needed, we use program to replace what kazoo done</p>
<hr>
<h2>Q: How to specify the machine running task</h2>
-<p>A: Use <strong>the administrator</strong> to create a Worker group,
<strong>specify the Worker group</strong> when the <strong>process definition
starts</strong>, or <strong>specify the Worker group on the task node</strong>.
If not specified, use Default, <strong>Default is to select one of all the
workers in the cluster to use for task submission and execution.</strong></p>
+<p>A: version 1.2 and berfore, Use <strong>the administrator</strong> to
create a Worker group, <strong>specify the Worker group</strong> when the
<strong>process definition starts</strong>, or <strong>specify the Worker group
on the task node</strong>. If not specified, use Default, <strong>Default is to
select one of all the workers in the cluster to use for task submission and
execution.</strong>
+version 1.3, you can set worker group for the worker</p>
<hr>
<h2>Q: Priority of the task</h2>
<p>A: We also support <strong>the priority of processes and tasks</strong>.
Priority We have five levels of <strong>HIGHEST, HIGH, MEDIUM, LOW and
LOWEST</strong>. <strong>You can set the priority between different process
instances, or you can set the priority of different task instances in the same
process instance.</strong> For details, please refer to the task priority
design in the architecture-design.</p>
<hr>
<h2>Q: dolphinscheduler-grpc gives an error</h2>
-<p>A: Execute in the root directory: mvn -U clean package assembly:assembly
-Dmaven.test.skip=true , then refresh the entire project</p>
+<p>A: Execute in the root directory: mvn -U clean package assembly:assembly
-Dmaven.test.skip=true , then refresh the entire project.
+version 1.3 not use grpc, we use netty directly</p>
<hr>
<h2>Q: Does DolphinScheduler support running on windows?</h2>
<p>A: In theory, <strong>only the Worker needs to run on Linux</strong>. Other
services can run normally on Windows. But it is still recommended to deploy on
Linux.</p>
@@ -76,7 +81,7 @@
<p> 3, if the above configuration is correct, then please check if the
Api Server service is normal,</p>
<p> curl <a
href="http://192.168.xx.xx:12345/dolphinscheduler/users/get-user-info">http://192.168.xx.xx:12345/dolphinscheduler/users/get-user-info</a>,
check the Api Server log,</p>
<p> if Prompt
cn.dolphinscheduler.api.interceptor.LoginHandlerInterceptor:[76] - session info
is null, which proves that the Api Server service is normal.</p>
-<p> 4, if there is no problem above, you need to check if
<strong>server.context-path and server.port configuration</strong> in
<strong>application.properties</strong> is correct</p>
+<p> 4, if there is no problem above, you need to check if
<strong>server.context-path and server.port configuration</strong> in
<strong>application.properties</strong> is correct</p>
<hr>
<h2>Q: After the process definition is manually started or scheduled, no
process instance is generated.</h2>
<p>A: 1, first <strong>check whether the MasterServer service exists through
jps</strong>, or directly check whether there is a master service in zk from
the service monitoring.</p>
@@ -124,6 +129,7 @@
<hr>
<h2>Q: Can the tenant created before the HDFS startup use the resource center
normally?</h2>
<p>A: No. Because the tenant created by HDFS is not started, the tenant
directory will not be registered in HDFS. So the last resource will report an
error.</p>
+<hr>
<h2>Q: In the multi-master and multi-worker state, the service is lost, how to
be fault-tolerant</h2>
<p>A: <strong>Note:</strong> <strong>Master monitors Master and Worker
services.</strong></p>
<p> 1,If the Master service is lost, other Masters will take over the
process of the hanged Master and continue to monitor the Worker task status.</p>
@@ -135,17 +141,21 @@
<hr>
<h2>Q : Timing is easy to set to execute every second</h2>
<p>A : Note when setting the timing. If the first digit (* * * * * ? *) is set
to *, it means execution every second. <strong>We will add a list of recently
scheduled times in version 1.1.0.</strong> You can see the last 5 running times
online at <a href="http://cron.qqe2.com/">http://cron.qqe2.com/</a></p>
+<hr>
<h2>Q: Is there a valid time range for timing?</h2>
<p>A: Yes, <strong>if the timing start and end time is the same time, then
this timing will be invalid timing. If the end time of the start and end time
is smaller than the current time, it is very likely that the timing will be
automatically deleted.</strong></p>
+<hr>
<h2>Q : There are several implementations of task dependencies</h2>
<p>A: 1, the task dependency between <strong>DAG</strong>, is <strong>from
the zero degree</strong> of the DAG segmentation</p>
<p> 2, there are <strong>task dependent nodes</strong>, you can
achieve cross-process tasks or process dependencies, please refer to the
(DEPENDENT) node design in the system-manual.</p>
<p> Note: <strong>Cross-project processes or task dependencies are not
supported</strong></p>
+<hr>
<h2>Q: There are several ways to start the process definition.</h2>
<p>A: 1, in <strong>the process definition list</strong>, click the
<strong>Start</strong> button.</p>
<p> 2, <strong>the process definition list adds a timer</strong>,
scheduling start process definition.</p>
<p> 3, process definition <strong>view or edit</strong> the DAG page,
any <strong>task node right click</strong> Start process definition.</p>
<p> 4, you can define DAG editing for the process, set the running flag
of some tasks to <strong>prohibit running</strong>, when the process definition
is started, the connection of the node will be removed from the DAG.</p>
+<hr>
<h2>Q : Python task setting Python version</h2>
<p>A: 1,<strong>for the version after 1.0.3</strong> only need to modify
PYTHON_HOME in conf/env/.dolphinscheduler_env.sh</p>
<pre><code>export PYTHON_HOME=/bin/python
@@ -154,8 +164,10 @@
<pre><code>export
PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
</code></pre>
<p> 2,For versions prior to 1.0.3, the Python task only supports
the Python version of the system. It does not support specifying the Python
version.</p>
+<hr>
<h2>Q:Worker Task will generate a child process through sudo -u tenant sh
xxx.command, will kill when kill</h2>
<p>A: We will add the kill task in 1.0.4 and kill all the various child
processes generated by the task.</p>
+<hr>
<h2>Q : How to use the queue in DolphinScheduler, what does the user queue and
tenant queue mean?</h2>
<p>A : The queue in the DolphinScheduler can be configured on the user or the
tenant. <strong>The priority of the queue specified by the user is higher than
the priority of the tenant queue.</strong> For example, to specify a queue for
an MR task, the queue is specified by mapreduce.job.queuename.</p>
<p>Note: When using the above method to specify the queue, the MR uses the
following methods:</p>
@@ -164,11 +176,13 @@
String[] remainingArgs = optionParser.getRemainingArgs();
</code></pre>
<p>If it is a Spark task --queue mode specifies the queue</p>
+<hr>
<h2>Q : Master or Worker reports the following alarm</h2>
<p align="center">
<img
src="https://analysys.github.io/easyscheduler_docs/images/master_worker_lack_res.png"
width="60%" />
</p>
<p>A : Change the value of master.properties
<strong>master.reserved.memory</strong> under conf to a smaller value, say 0.1
or the value of worker.properties <strong>worker.reserved.memory</strong> is a
smaller value, say 0.1</p>
+<hr>
<h2>Q: The hive version is 1.1.0+cdh5.15.0, and the SQL hive task connection
is reported incorrectly.</h2>
<p align="center">
<img
src="https://analysys.github.io/easyscheduler_docs/images/cdh_hive_error.png"
width="60%" />
@@ -187,7 +201,193 @@
<version>1.1.0</version>
</dependency>
</code></pre>
-<p>we will collect more FAQ later</p>
+<hr>
+<h2>Q : how to add a worker server</h2>
+<p>A: 1, Create deployment user and hosts mapping, please refer 1.3 part of <a
href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/cluster-deployment.html">cluster
deployment</a></p>
+<p> 2, Configure hosts mapping and ssh access and modify directory
permissions. please refer 1.4 part of <a
href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/cluster-deployment.html">cluster
deployment</a></p>
+<p> 3, Copy the deployment directory from worker server that has
already deployed</p>
+<p> 4, Go to bin dir, then start worker server and logger server</p>
+<pre><code> ```
+ ./dolphinscheduler-daemon.sh start worker-server
+ ./dolphinscheduler-daemon.sh start logger-server
+ ```
+</code></pre>
+<hr>
+<h2>Q : When DolphinScheduler release a new version, and the change between
current version and latest, and how to upgrade, and version number
specification</h2>
+<p>A: 1, The release process of Apache Project happens in the mailing list.
You can subscribe DolphinScheduler's mailing list and then when the release is
in process, you'll receive release emails. Please follow this <a
href="https://github.com/apache/incubator-dolphinscheduler#get-help">introduction</a>
to subscribe DolphinScheduler's mailing list.</p>
+<p>2, When new version published, there would be release note which describe
the change log, and there also have upgrade document for the previous version
to new's.</p>
+<p>3, Version number is x.y.z, when x is increased, it represents the version
of the new architecture. When y is increased, it means that it is incompatible
with the y version before it needs to be upgraded by script or other manual
processing. When the z increase represents a bug fix, the upgrade is fully
compatible. No additional processing is required. Remaining problem, the 1.0.2
upgrade is not compatible with 1.0.1 and requires an upgrade script.</p>
+<hr>
+<h2>Q : Subsequent tasks can execute even front task failed</h2>
+<p>A: When start the workflow, you can set the task failure strategy: contine
or failure.
+<img
src="https://user-images.githubusercontent.com/15833811/80368215-ee378080-88be-11ea-9074-01a33d012b23.png"
alt="set task failure strategy"></p>
+<hr>
+<h2>Q : Workflow template DAG, workflow instance, work task and what is the
relationship among them? A DAG supports a maximum concurrency of 100, does it
mean that 100 workflow instances are generated and run concurrently? A task
node in a DAG also has a concurrent number configuration. Does it mean that
tasks can run concurrently with multiple threads? Is the maximum number
100?</h2>
+<p>A:</p>
+<p>1.2.1 version</p>
+<pre><code>master.properties
+Control the max parallel number of master node workflows
+master.exec.threads=100
+
+Control the max number of parallel tasks in each workflow
+master.exec.task.number=20
+
+worker.properties
+Control the max parallel number of worker node tasks
+worker.exec.threads=100
+</code></pre>
+<hr>
+<h2>Q : Worker group manage page no buttons displayed</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/39816903/81903776-d8cb9180-95f4-11ea-98cb-94ca1e6a1db5.png"
width="60%" />
+</p>
+A: For version 1.3.0, we want to support k8s, while the ip always will be
changed, so can't config on the UI, worker can config group name in the
worker.properties.
+<hr>
+<h2>Q : Why not add mysql jdbc connector to docker image</h2>
+<p>A: The license of mysql jdbc connector is not compitable with apache v2
license, so it can't be included by docker image.</p>
+<hr>
+<h2>Q : Allways fail when a task instance submit multiple yarn application</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/16174111/81312485-476e9380-90b9-11ea-9aad-ed009db899b1.png"
width="60%" />
+ </p>
+A: This bug have fix in dev and in Requirement/TODO list.
+<hr>
+<h2>Q : Master server and worker server stop abnormally after run for a few
days</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/18378986/81293969-c3101680-90a0-11ea-87e5-ac9f0dd53f5e.png"
width="60%" />
+ </p>
+A: Session timeout is too short, only 0.3 seconds. Change the config item in
zookeeper.properties:
+<pre><code> zookeeper.session.timeout=60000
+ zookeeper.connection.timeout=30000
+</code></pre>
+<hr>
+<h2>Q : Started using the docker-compose default configuration and display
zookeeper errors</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/42579056/80374318-13c98780-88c9-11ea-8d5f-53448b957f02.png"
width="60%" />
+ </p>
+A: This problem is solved in dev-1.3.0. This
[pr](https://github.com/apache/incubator-dolphinscheduler/pull/2595) has solved
this bug, brief change log:
+<pre><code> 1. add zookeeper environment variable
ZOO_4LW_COMMANDS_WHITELIST in docker-compose.yml file.
+ 2. change the data type of minLatency, avgLatency and maxLatency from int
to float.
+</code></pre>
+<hr>
+<h2>Q : Interface show some task would be running all the time when db delayed
and log show task instance is null</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/51871547/80302626-b1478d00-87dd-11ea-97d4-08aa2244a6d0.jpg"
width="60%" />
+ </p>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/51871547/80302626-b1478d00-87dd-11ea-97d4-08aa2244a6d0.jpg"
width="60%" />
+ </p>
+<p>A: This <a
href="https://github.com/apache/incubator-dolphinscheduler/issues/1477">bug</a>
describe the problem detail and it has been been solved in version 1.2.1.</p>
+<p>For version under 1.2.1, some tips for this situation:</p>
+<pre><code>1. clear the task queue in zk for path: /dolphinscheduler/task_queue
+2. change the state of the task to failed( integer value: 6).
+3. run the work flow by recover from failed
+</code></pre>
+<hr>
+<h2>Q : Zookeeper masters znode list ip address is 127.0.0.1, instead of
wanted ip eth0 or eth1, and may can't see task log</h2>
+<p>A: bug fix:</p>
+<pre><code> 1, confirm hostname
+ $hostname
+ hadoop1
+ 2, hostname -i
+ 127.0.0.1 10.3.57.15
+ 3, edit /etc/hosts,delete hadoop1 from 127.0.0.1 record
+ $cat /etc/hosts
+ 127.0.0.1 localhost
+ 10.3.57.15 ds1 hadoop1
+ 4, hostname -i
+ 10.3.57.15
+</code></pre>
+<p>Hostname cmd return server hostname, hostname -i return all matched ips
configured in /etc/hosts. So after I delete the hostname matched with
127.0.0.1, and only remain internal ip resolution, instead of remove all the
127.0.0.1 resolution record. As long as hostname cmd return the correct
internal ip configured in /etc/hosts can fix this bug. DolphinScheduler use the
first record returned by hostname -i command. In my opion, DS should not use
hostname -i to get the ip , as in many co [...]
+<hr>
+<h2>Q : The scheduling system set a second frequency task, causing the system
to crash</h2>
+<p>A: The scheduling system not support second frequency task.</p>
+<hr>
+<h2>Q : Compile front-end code(dolphinscheduler-ui) show error cannot download
"<a
href="https://github.com/sass/node-sass/releases/download/v4.13.1/darwin-x64-72_binding.node">https://github.com/sass/node-sass/releases/download/v4.13.1/darwin-x64-72_binding.node</a>"</h2>
+<p>A: 1, cd dolphinscheduler-ui and delete node_modules directory</p>
+<pre><code>sudo rm -rf node_modules
+</code></pre>
+<p> 2, install node-sass through <a
href="http://npm.taobao.org">npm.taobao.org</a></p>
+<pre><code>sudo npm uninstall node-sass
+sudo npm i node-sass
--sass_binary_site=https://npm.taobao.org/mirrors/node-sass/
+</code></pre>
+<p>3, if the 2nd step failure, please, <a
href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/frontend-development.html">referer
url</a></p>
+<pre><code> sudo npm rebuild node-sass
+</code></pre>
+<p>When solved this problem, if you don't want to download this node every
time, you can set system environment variable: SASS_BINARY_PATH=
/xxx/xxx/xxx/xxx.node.</p>
+<hr>
+<h2>Q : How to config when use mysql as database instead of postgres</h2>
+<p>A: 1, Edit project root dir maven config file, remove scope test property
so that mysql driver can be loaded.</p>
+<pre><code><dependency>
+ <groupId>mysql</groupId>
+ <artifactId>mysql-connector-java</artifactId>
+ <version>${mysql.connector.version}</version>
+ <scope>test<scope>
+</dependency>
+</code></pre>
+<p> 2, Edit application-dao.properties and quzrtz.properties config file to
use mysql driver.
+Default is postgresSql driver because of license problem.</p>
+<hr>
+<h2>Q : How does a shell task run</h2>
+<p>A: 1, Where is the executed server? Specify one worker to run the task, you
can create worker group in Security Center, then the task can be send to the
particular worker. If a workder group have multiple servers, which server
actually execute is determined by scheduling and has randomness.</p>
+<p> 2, If it is a shell file of a path on the server, how to point to the
path? The server shell file, involving permissions issues, it is not
recommended to do so. It is recommended that you use the storage function of
the resource center, and then use the resource reference in the shell editor.
The system will help you download the script to the execution directory. If the
task dependent on resource center files, worker use "hdfs dfs -get"
to get the resource files in HDFS, [...]
+<p>3, Which user execute the task? Task is run by the tenant through
"sudo -u ${tenant}", tenant is a linux user.</p>
+<hr>
+<h2>Q : What’s the best deploy mode you suggest in production env</h2>
+<p>A: 1, I suggest you use 3 nodes for stability if you don't have too many
tasks to run. And deploy Master/Worker server on different nodes is better. If
you only have one node, you of course only can deploy them together! By the
way, how many machines you need is determined by your business. The
DolphinScheduler system itself does not use too many resources. Test more, and
you'll find the right way to use a few machines.</p>
+<hr>
+<h2>Q : DEPENDENT Task Node</h2>
+<p>A: 1, DEPENDENT task node actually does not have script, it used for config
data cycle dependent logic, and then add task node after that to realize task
cycle dependent.</p>
+<hr>
+<h2>Q : How to change the boot port of the master</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/8263441/62352160-0f3e9100-b53a-11e9-95ba-3ae3dde49c72.png"
width="60%" />
+ </p>
+A: 1, modify application_master.properties, for example: server.port=12345.
+<hr>
+<h2>Q : Scheduled tasks cannot be online</h2>
+<p>A: 1, We can successly create scheduled task and add one record into
t_scheduler_schedules table, but when I click online, front page no reaction
and will lock table t_scheduler_schedules, and tested set field release_state
value to 1 in table t_scheduler_schedules, and task display online state. For
DS version above 1.2 table name is t_ds_schedules, other version table name is
t_scheduler_schedules.</p>
+<hr>
+<h2>Q : What is the address of swagger ui</h2>
+<p>A: 1, For version 1.2+ is <a
href="http://apiServerIp">http://apiServerIp</a>:apiServerPort/dolphinscheduler/doc.html
others is <a
href="http://apiServerIp">http://apiServerIp</a>:apiServerPort/escheduler/doc.html.</p>
+<hr>
+<h2>Q : Front-end installation package is missing files</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/41460919/61437083-d960b080-a96e-11e9-87f1-297ba3aca5e3.png"
width="60%" />
+ </p>
+ <p align="center">
+ <img
src="https://user-images.githubusercontent.com/41460919/61437218-1b89f200-a96f-11e9-8e48-3fac47eb2389.png"
width="60%" />
+ </p>
+<p>A: 1, User changed the config api server config file and item
+<img
src="https://user-images.githubusercontent.com/41460919/61678323-1b09a680-ad35-11e9-9707-3ba68bbc70d6.png"
alt="apiServerContextPath">, thus lead to the problem. After resume to the
default value and problem solved.</p>
+<hr>
+<h2>Q : Upload a relatively large file blocked</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/21357069/58231400-805b0e80-7d69-11e9-8107-7f37b06a95df.png"
width="60%" />
+ </p>
+A: 1, Edit ngnix config file, edit upload max size client_max_body_size 1024m.
+<p> 2, the version of Google Chrome is old, and the latest version of the
browser has been updated.</p>
+<hr>
+<h2>Q : Create a spark data source, click "Test Connection", the
system will fall back to the login page</h2>
+<p>A: 1, edit nginx config file /etc/nginx/conf.d/escheduler.conf</p>
+<pre><code> proxy_connect_timeout 300s;
+ proxy_read_timeout 300s;
+ proxy_send_timeout 300s;
+</code></pre>
+<hr>
+<h2>Q : Welcome to subscribe the DolphinScheduler development mailing list</h2>
+<p>A: In the process of using DolphinScheduler, if you have any questions or
ideas, suggestions, you can participate in the DolphinScheduler community
building through the Apache mailing list. Sending a subscription email is also
very simple, the steps are as follows:</p>
+<p>1, Send an email to <a
href="mailto:[email protected]">[email protected]</a>
with your own email address, subject and content.</p>
+<p>2, Receive confirmation email and reply. After completing step 1, you will
receive a confirmation email from <a
href="mailto:[email protected]">[email protected]</a>
(if not received, please confirm whether the email is automatically classified
as spam, promotion email, subscription email, etc.) . Then reply directly to
the email, or click on the link in the email to reply quickly, the subject and
content are arbitrary.</p>
+<p>3, Receive a welcome email. After completing the above steps, you will
receive a welcome email with the subject WELCOME to <a
href="mailto:[email protected]">[email protected]</a>,
and you have successfully subscribed to the Apache DolphinScheduler
(Incubating) mailing list.</p>
+<hr>
+<h2>Q : Workflow Dependency</h2>
+<p>A: 1, It is currently judged according to natural days, at the end of last
month: the judgment time is the workflow A start_time/scheduler_time between
'2019-05-31 00:00:00' and '2019-05-31 23:59:59'. Last month: It is judged that
there is an A instance completed every day from the 1st to the end of the
month. Last week: There are completed A instances 7 days last week. The first
two days: Judging yesterday and the day before yesterday, there must be a
completed A instance for two days.</p>
+<hr>
+<h2>Q : DS Backend Inteface Document</h2>
+<p>A: 1, <a
href="http://106.75.43.194:8888/dolphinscheduler/doc.html?language=en">http://106.75.43.194:8888/dolphinscheduler/doc.html?language=en</a>.</p>
+<hr>
+<h2>We will collect more FAQ later.</h2>
</div></section><footer class="footer-container"><div class="footer-body"><img
src="/img/ds_gray.svg"/><div class="cols-container"><div class="col
col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort
undergoing incubation at The Apache Software Foundation (ASF), sponsored by
Incubator.
Incubation is required of all newly accepted projects until a further review
indicates
that the infrastructure, communications, and decision making process have
stabilized in a manner consistent with other successful ASF projects.
diff --git a/en-us/docs/release/faq.json b/en-us/docs/release/faq.json
index adb9255..0b37201 100644
--- a/en-us/docs/release/faq.json
+++ b/en-us/docs/release/faq.json
@@ -1,6 +1,6 @@
{
"filename": "faq.md",
- "__html": "<h2>Q: DolphinScheduler service introduction and recommended
running memory</h2>\n<p>A: DolphinScheduler consists of 5 services,
MasterServer, WorkerServer, ApiServer, AlertServer, LoggerServer and
UI.</p>\n<table>\n<thead>\n<tr>\n<th>Service</th>\n<th>Description</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>MasterServer</td>\n<td>Mainly
responsible for DAG segmentation and task status
monitoring</td>\n</tr>\n<tr>\n<td>WorkerServer/LoggerServer</td>\n<td>Mainly
responsible for [...]
+ "__html": "<h2>Q: What's the name of this project?</h2>\n<p>A: Before
version 1.2 project name is called EasyScheduler, version 1.2 and later it's
called DolphinScheduler.</p>\n<hr>\n<h2>Q: DolphinScheduler service
introduction and recommended running memory</h2>\n<p>A: DolphinScheduler
consists of 5 services, MasterServer, WorkerServer, ApiServer, AlertServer,
LoggerServer and
UI.</p>\n<table>\n<thead>\n<tr>\n<th>Service</th>\n<th>Description</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>
[...]
"link": "/en-us/docs/release/faq.html",
"meta": {}
}
\ No newline at end of file
diff --git a/zh-cn/docs/release/faq.html b/zh-cn/docs/release/faq.html
index 02188e4..06fadf2 100644
--- a/zh-cn/docs/release/faq.html
+++ b/zh-cn/docs/release/faq.html
@@ -12,8 +12,11 @@
<link rel="stylesheet" href="/build/documentation.css" />
</head>
<body>
- <div id="root"><div class="documentation-page"
data-reactroot=""><header class="header-container header-container-normal"><div
class="header-body"><a href="/zh-cn/index.html"><img class="logo"
src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span
class="icon-search"></span></div><span class="language-switch
language-switch-normal">En</span><div class="header-menu"><img
class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul
class="ant-menu blackClass an [...]
-<p>A:
DolphinScheduler由5个服务组成,MasterServer、WorkerServer、ApiServer、AlertServer、LoggerServer和UI。</p>
+ <div id="root"><div class="documentation-page"
data-reactroot=""><header class="header-container header-container-normal"><div
class="header-body"><a href="/zh-cn/index.html"><img class="logo"
src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span
class="icon-search"></span></div><span class="language-switch
language-switch-normal">En</span><div class="header-menu"><img
class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul
class="ant-menu blackClass an [...]
+<p>A:1.2 版本以前项目名称是 eschedule,1.2 以及之后的版本叫做 dolphinScheduler。</p>
+<hr>
+<h2>Q:DolphinScheduler 服务介绍及建议运行内存</h2>
+<p>A:DolphinScheduler 由 5
个服务组成,MasterServer、WorkerServer、ApiServer、AlertServer、LoggerServer 和 UI。</p>
<table>
<thead>
<tr>
@@ -28,11 +31,11 @@
</tr>
<tr>
<td>WorkerServer/LoggerServer</td>
-<td>主要负责任务的提交、执行和任务状态的更新。LoggerServer用于Rest Api通过 <strong>RPC</strong>
查看日志</td>
+<td>主要负责任务的提交、执行和任务状态的更新。LoggerServer 用于 Rest Api 通过 <strong>RPC</strong>
查看日志</td>
</tr>
<tr>
<td>ApiServer</td>
-<td>提供Rest Api服务,供UI进行调用</td>
+<td>提供 Rest Api 服务,供 UI 进行调用</td>
</tr>
<tr>
<td>AlertServer</td>
@@ -44,60 +47,60 @@
</tr>
</tbody>
</table>
-<p>注意:<strong>由于服务比较多,建议单机部署最好是4核16G以上</strong></p>
+<p>注意:<strong>由于服务比较多,建议单机部署最好是 4 核 16G 以上</strong></p>
<hr>
<h2>Q:系统支持哪些邮箱?</h2>
-<p>A:支持绝大多数邮箱,qq、163、126、139、outlook、aliyun等皆支持。支持<strong>TLS和SSL</strong>协议,可以在alert.properties中选择性配置</p>
+<p>A:支持绝大多数邮箱,qq、163、126、139、outlook、aliyun 等皆支持。支持 <strong>TLS 和 SSL</strong>
协议,可以在 alert.properties 中选择性配置</p>
<hr>
<h2>Q:常用的系统变量时间参数有哪些,如何使用?</h2>
-<p>A:请参考 使用手册</p>
+<p>A:请参考使用手册</p>
<hr>
<h2>Q:pip install kazoo 这个安装报错。是必须安装的吗?</h2>
-<p>A: 这个是python连接Zookeeper需要使用到的,必须要安装</p>
+<p>A: 这个是 python 连接 Zookeeper 需要使用到的,必须要安装</p>
<hr>
-<h2>Q: 怎么指定机器运行任务</h2>
-<p>A:使用 <strong>管理员</strong> 创建Worker分组,在 <strong>流程定义启动</strong>
的时候可<strong>指定Worker分组</strong>或者在<strong>任务节点上指定Worker分组</strong>。如果不指定,则使用Default,<strong>Default默认是使用的集群里所有的Worker中随机选取一台来进行任务提交、执行</strong></p>
+<h2>Q:怎么指定机器运行任务</h2>
+<p>A:使用 <strong>管理员</strong> 创建 Worker 分组,在 <strong>流程定义启动</strong>
的时候可<strong>指定Worker分组</strong>或者在<strong>任务节点上指定Worker分组</strong>。如果不指定,则使用
Default,<strong>Default默认是使用的集群里所有的Worker中随机选取一台来进行任务提交、执行</strong></p>
<hr>
<h2>Q:任务的优先级</h2>
-<p>A:我们同时 <strong>支持流程和任务的优先级</strong>。优先级我们有
<strong>HIGHEST、HIGH、MEDIUM、LOW和LOWEST</strong>
五种级别。<strong>可以设置不同流程实例之间的优先级,也可以设置同一个流程实例中不同任务实例的优先级</strong>。详细内容请参考任务优先级设计
<a
href="https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1.html#%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1">https://analysys.github.io/easyscheduler_docs_cn/系统架构设计.html#系统架构设计</a></p>
+<p>A:我们同时 <strong>支持流程和任务的优先级</strong>。优先级我们有 <strong>HIGHEST、HIGH、MEDIUM、LOW
和 LOWEST</strong>
五种级别。<strong>可以设置不同流程实例之间的优先级,也可以设置同一个流程实例中不同任务实例的优先级</strong>。详细内容请参考任务优先级设计
<a
href="https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1.html#%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1">https://analysys.github.io/easyscheduler_docs_cn/系统架构设计.html#系统架构设计</a></p>
<hr>
-<h2>Q:dolphinscheduler-grpc报错</h2>
+<h2>Q:dolphinscheduler-grpc 报错</h2>
<p>A:在根目录下执行:mvn -U clean package assembly:assembly -Dmaven.test.skip=true ,
然后刷新下整个项目</p>
<hr>
-<h2>Q:DolphinScheduler支持windows上运行么</h2>
-<p>A:
理论上只有<strong>Worker是需要在Linux上运行的</strong>,其它的服务都是可以在windows上正常运行的。但是还是建议最好能在linux上部署使用</p>
+<h2>Q:DolphinScheduler 支持 windows 上运行么</h2>
+<p>A: 理论上只有 <strong>Worker 是需要在 Linux 上运行的</strong>,其它的服务都是可以在 windows
上正常运行的。但是还是建议最好能在 linux 上部署使用</p>
<hr>
-<h2>Q:UI 在 linux 编译node-sass提示:Error:EACCESS:permission denied,mkdir xxxx</h2>
+<h2>Q:UI 在 linux 编译 node-sass 提示:Error:EACCESS:permission denied,mkdir
xxxx</h2>
<p>A:单独安装 <strong>npm install node-sass --unsafe-perm</strong>,之后再 <strong>npm
install</strong></p>
<hr>
<h2>Q:UI 不能正常登陆访问</h2>
-<p>A: 1,如果是node启动的查看dolphinscheduler-ui下的.env API_BASE配置是否是Api Server服务地址</p>
-<p> 2,如果是nginx启动的并且是通过<strong><a
href="http://install-dolphinscheduler-ui.sh">install-dolphinscheduler-ui.sh</a></strong>安装的,查看</p>
-<p>
<strong>/etc/nginx/conf.d/dolphinscheduler.conf</strong>中的proxy_pass配置是否是Api
Server服务地址</p>
-<p> 3,如果以上配置都是正确的,那么请查看Api Server服务是否是正常的,</p>
-<p> curl <a
href="http://192.168.xx.xx:12345/dolphinscheduler/users/get-user-info">http://192.168.xx.xx:12345/dolphinscheduler/users/get-user-info</a>
查看Api Server日志,</p>
-<p>
如果提示cn.dolphinscheduler.api.interceptor.LoginHandlerInterceptor:[76] - session
info is null,则证 明Api Server服务是正常的</p>
-<p> 4,如果以上都没有问题,需要查看一下<strong>application.properties</strong> 中的
<strong>server.context-path 和
server.port 配置</strong>是否正确</p>
-<hr>
-<h2>Q: 流程定义手动启动或调度启动之后,没有流程实例生成</h2>
-<p>A: 1,首先通过<strong>jps
查看MasterServer服务是否存在</strong>,或者从服务监控直接查看zk中是否存在master服务</p>
-<p> 2,如果存在master服务,查看 <strong>命令状态统计</strong> 或者
<strong>t_ds_error_command</strong> 中是否增加的新记录,如果增加了,<strong>请查看 message
字段定位启动异常原因</strong></p>
-<hr>
-<h2>Q : 任务状态一直处于提交成功状态</h2>
-<p>A: 1,首先通过<strong>jps
查看WorkerServer服务是否存在</strong>,或者从服务监控直接查看zk中是否存在worker服务</p>
-<p> 2,如果 <strong>WorkerServer</strong> 服务正常,需要
<strong>查看MasterServer是否把task任务放到zk队列中</strong>
,<strong>需要查看MasterServer日志及zk队列中是否有任务阻塞</strong></p>
-<p> 3,如果以上都没有问题,需要定位是否指定了Worker分组,但是
<strong>Worker分组的机器不是在线状态</strong></p>
-<hr>
-<h2>Q : <a href="http://install.sh">install.sh</a> 中需要注意问题</h2>
+<p>A: 1,如果是 node 启动的查看 dolphinscheduler-ui 下的 .env API_BASE 配置是否是 Api Server
服务地址</p>
+<p> 2,如果是 nginx 启动的并且是通过 <strong><a
href="http://install-dolphinscheduler-ui.sh">install-dolphinscheduler-ui.sh</a></strong>
安装的,查看</p>
+<p> <strong>/etc/nginx/conf.d/dolphinscheduler.conf</strong> 中的
proxy_pass 配置是否是 Api Server 服务地址</p>
+<p> 3,如果以上配置都是正确的,那么请查看 Api Server 服务是否是正常的,</p>
+<p> curl <a
href="http://192.168.xx.xx:12345/dolphinscheduler/users/get-user-info">http://192.168.xx.xx:12345/dolphinscheduler/users/get-user-info</a>
查看 Api Server 日志,</p>
+<p> 如果提示
cn.dolphinscheduler.api.interceptor.LoginHandlerInterceptor:[76] - session info
is null,则证明 Api Server 服务是正常的</p>
+<p> 4,如果以上都没有问题,需要查看一下 <strong>application.properties</strong> 中的
<strong>server.context-path 和 server.port 配置</strong>是否正确</p>
+<hr>
+<h2>Q:流程定义手动启动或调度启动之后,没有流程实例生成</h2>
+<p>A: 1,首先通过 <strong>jps 查看MasterServer服务是否存在</strong>,或者从服务监控直接查看 zk 中是否存在
master 服务</p>
+<p> 2,如果存在 master 服务,查看 <strong>命令状态统计</strong> 或者
<strong>t_ds_error_command</strong> 中是否增加的新记录,如果增加了,<strong>请查看 message
字段定位启动异常原因</strong></p>
+<hr>
+<h2>Q:任务状态一直处于提交成功状态</h2>
+<p>A: 1,首先通过 <strong>jps 查看 WorkerServer 服务是否存在</strong>,或者从服务监控直接查看 zk 中是否存在
worker 服务</p>
+<p> 2,如果 <strong>WorkerServer</strong> 服务正常,需要 <strong>查看 MasterServer
是否把 task 任务放到 zk 队列中</strong> ,<strong>需要查看 MasterServer 日志及 zk
队列中是否有任务阻塞</strong></p>
+<p> 3,如果以上都没有问题,需要定位是否指定了 Worker 分组,但是 <strong>Worker
分组的机器不是在线状态</strong></p>
+<hr>
+<h2>Q:<a href="http://install.sh">install.sh</a> 中需要注意问题</h2>
<p>A: 1,如果替换变量中包含特殊字符,<strong>请用 \ 转移符进行转移</strong></p>
-<p>
2,installPath="/data1_1T/dolphinscheduler",<strong>这个目录不能和当前要一键安装的install.sh目录是一样的</strong></p>
-<p>
3,deployUser="dolphinscheduler",<strong>部署用户必须具有sudo权限</strong>,因为worker是通过sudo
-u 租户 sh xxx.command进行执行的</p>
-<p>
4,monitorServerState="false",服务监控脚本是否启动,默认是不启动服务监控脚本的。<strong>如果启动服务监控脚本,则每5分钟定时来监控master和worker的服务是否down机,如果down机则会自动重启</strong></p>
-<p>
5,hdfsStartupSate="false",是否开启HDFS资源上传功能。默认是不开启的,<strong>如果不开启则资源中心是不能使用的</strong>。如果开启,需要conf/common/hadoop/hadoop.properties中配置fs.defaultFS和yarn的相关配置,如果使用namenode
HA,需要将core-site.xml和hdfs-site.xml复制到conf根目录下</p>
-<p> 注意:<strong>1.0.x版本是不会自动创建hdfs根目录的,需要自行创建,并且需要部署用户有hdfs的操作权限</strong></p>
-<hr>
-<h2>Q : 流程定义和流程实例下线异常</h2>
-<p>A : 对于 <strong>1.0.4 以前的版本中</strong>,修改dolphinscheduler-api
cn.dolphinscheduler.api.quartz包下的代码即可</p>
+<p>
2,installPath="/data1_1T/dolphinscheduler",<strong>这个目录不能和当前要一键安装的 <a
href="http://install.sh">install.sh</a> 目录是一样的</strong></p>
+<p> 3,deployUser="dolphinscheduler",<strong>部署用户必须具有 sudo
权限</strong>,因为 worker 是通过 sudo -u 租户 sh xxx.command 进行执行的</p>
+<p>
4,monitorServerState="false",服务监控脚本是否启动,默认是不启动服务监控脚本的。<strong>如果启动服务监控脚本,则每
5 分钟定时来监控 master 和 worker 的服务是否 down 机,如果 down 机则会自动重启</strong></p>
+<p> 5,hdfsStartupSate="false",是否开启 HDFS
资源上传功能。默认是不开启的,<strong>如果不开启则资源中心是不能使用的</strong>。如果开启,需要
conf/common/hadoop/hadoop.properties 中配置 fs.defaultFS 和 yarn 的相关配置,如果使用
namenode HA,需要将 core-site.xml 和 hdfs-site.xml 复制到conf根目录下</p>
+<p> 注意:<strong>1.0.x 版本是不会自动创建 hdfs
根目录的,需要自行创建,并且需要部署用户有hdfs的操作权限</strong></p>
+<hr>
+<h2>Q:流程定义和流程实例下线异常</h2>
+<p>A : 对于 <strong>1.0.4 以前的版本中</strong>,修改 dolphinscheduler-api
cn.dolphinscheduler.api.quartz 包下的代码即可</p>
<pre><code>public boolean deleteJob(String jobName, String jobGroupName) {
lock.writeLock().lock();
try {
@@ -118,59 +121,59 @@
}
</code></pre>
<hr>
-<h2>Q : HDFS启动之前创建的租户,能正常使用资源中心吗</h2>
-<p>A: 不能。因为在未启动HDFS创建的租户,不会在HDFS中注册租户目录。所以上次资源会报错</p>
-<h2>Q : 多Master和多Worker状态下,服务掉了,怎么容错</h2>
-<p>A: <strong>注意:Master监控Master及Worker服务。</strong></p>
-<p> 1,如果Master服务掉了,其它的Master会接管挂掉的Master的流程,继续监控Worker task状态</p>
-<p> 2,如果Worker服务掉了,Master会监控到Worker服务掉了,如果存在Yarn任务,Kill Yarn任务之后走重试</p>
+<h2>Q:HDFS 启动之前创建的租户,能正常使用资源中心吗</h2>
+<p>A: 不能。因为在未启动 HDFS 创建的租户,不会在 HDFS 中注册租户目录。所以上次资源会报错</p>
+<h2>Q:多 Master 和多 Worker 状态下,服务掉了,怎么容错</h2>
+<p>A: <strong>注意:Master 监控 Master 及 Worker 服务。</strong></p>
+<p> 1,如果 Master 服务掉了,其它的 Master 会接管挂掉的 Master 的流程,继续监控 Worker task 状态</p>
+<p> 2,如果 Worker 服务掉了,Master 会监控到 Worker 服务掉了,如果存在 Yarn 任务,Kill Yarn
任务之后走重试</p>
<p>具体请看容错设计:<a
href="https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1.html#%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1">https://analysys.github.io/easyscheduler_docs_cn/系统架构设计.html#系统架构设计</a></p>
<hr>
-<h2>Q : 对于Master和Worker一台机器伪分布式下的容错</h2>
-<p>A : 1.0.3
版本只实现了Master启动流程容错,不走Worker容错。也就是说如果Worker挂掉的时候,没有Master存在。这流程将会出现问题。我们会在
<strong>1.1.0</strong> 版本中增加Master和Worker启动自容错,修复这个问题。如果想手动修改这个问题,需要针对
<strong>跨重启正在运行流程</strong>
<strong>并且已经掉的正在运行的Worker任务,需要修改为失败</strong>,<strong>同时跨重启正在运行流程设置为失败状态</strong>。然后从失败节点进行流程恢复即可</p>
+<h2>Q:对于 Master 和 Worker 一台机器伪分布式下的容错</h2>
+<p>A : 1.0.3 版本只实现了 Master 启动流程容错,不走 Worker 容错。也就是说如果 Worker 挂掉的时候,没有 Master
存在。这流程将会出现问题。我们会在 <strong>1.1.0</strong> 版本中增加 Master 和 Worker
启动自容错,修复这个问题。如果想手动修改这个问题,需要针对 <strong>跨重启正在运行流程</strong> <strong>并且已经掉的正在运行的
Worker
任务,需要修改为失败</strong>,<strong>同时跨重启正在运行流程设置为失败状态</strong>。然后从失败节点进行流程恢复即可</p>
<hr>
-<h2>Q : 定时容易设置成每秒执行</h2>
-<p>A : 设置定时的时候需要注意,如果第一位(* * * * * ? *)设置成 *
,则表示每秒执行。<strong>我们将会在1.1.0版本中加入显示最近调度的时间列表</strong> ,使用http://cron.qqe2.com/
可以在线看近5次运行时间</p>
-<h2>Q: 定时有有效时间范围吗</h2>
+<h2>Q:定时容易设置成每秒执行</h2>
+<p>A : 设置定时的时候需要注意,如果第一位(* * * * * ? *)设置成 * ,则表示每秒执行。<strong>我们将会在 1.1.0
版本中加入显示最近调度的时间列表</strong> ,使用 <a
href="http://cron.qqe2.com/">http://cron.qqe2.com/</a> 可以在线看近 5 次运行时间</p>
+<h2>Q:定时有有效时间范围吗</h2>
<p>A:有的,<strong>如果定时的起止时间是同一个时间,那么此定时将是无效的定时</strong>。<strong>如果起止时间的结束时间比当前的时间小,很有可能定时会被自动删除</strong></p>
-<h2>Q : 任务依赖有几种实现</h2>
-<p>A: 1,<strong>DAG</strong> 之间的任务依赖关系,是从 <strong>入度为零</strong> 进行DAG切分的</p>
+<h2>Q:任务依赖有几种实现</h2>
+<p>A: 1,<strong>DAG</strong> 之间的任务依赖关系,是从 <strong>入度为零</strong> 进行 DAG 切分的</p>
<p> 2,有 <strong>任务依赖节点</strong> ,可以实现跨流程的任务或者流程依赖,具体请参考
依赖(DEPENDENT)节点:<a
href="https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C.html#%E4%BB%BB%E5%8A%A1%E8%8A%82%E7%82%B9%E7%B1%BB%E5%9E%8B%E5%92%8C%E5%8F%82%E6%95%B0%E8%AE%BE%E7%BD%AE">https://analysys.github.io/easyscheduler_docs_cn/系统使用手册.html#任务节点类型和参数设置</a></p>
<p> 注意:<strong>不支持跨项目的流程或任务依赖</strong></p>
-<h2>Q: 流程定义有几种启动方式</h2>
+<h2>Q:流程定义有几种启动方式</h2>
<p>A: 1,在 <strong>流程定义列表</strong>,点击 <strong>启动</strong> 按钮</p>
<p> 2,<strong>流程定义列表添加定时器</strong>,调度启动流程定义</p>
<p> 3,流程定义 <strong>查看或编辑</strong> DAG 页面,任意 <strong>任务节点右击</strong>
启动流程定义</p>
-<p> 4,可以对流程定义 DAG 编辑,设置某些任务的运行标志位
<strong>禁止运行</strong>,则在启动流程定义的时候,将该节点的连线将从DAG中去掉</p>
-<h2>Q : Python任务设置Python版本</h2>
-<p>A: 1,对于<strong>1.0.3之后的版本</strong>只需要修改
conf/env/.dolphinscheduler_env.sh中的PYTHON_HOME</p>
+<p> 4,可以对流程定义 DAG 编辑,设置某些任务的运行标志位
<strong>禁止运行</strong>,则在启动流程定义的时候,将该节点的连线将从 DAG 中去掉</p>
+<h2>Q:Python 任务设置 Python 版本</h2>
+<p>A: 1,对于 <strong>1.0.3 之后的版本</strong>只需要修改
conf/env/.dolphinscheduler_env.sh 中的 PYTHON_HOME</p>
<pre><code>export PYTHON_HOME=/bin/python
</code></pre>
-<p>注意:这了 <strong>PYTHON_HOME</strong> ,是python命令的绝对路径,而不是单纯的
PYTHON_HOME,还需要注意的是 export PATH 的时候,需要直接</p>
+<p>注意:这了 <strong>PYTHON_HOME</strong> ,是 python 命令的绝对路径,而不是单纯的
PYTHON_HOME,还需要注意的是 export PATH 的时候,需要直接</p>
<pre><code>export
PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
</code></pre>
-<p> 2,对 1.0.3 之前的版本,Python任务只能支持系统的Python版本,不支持指定Python版本</p>
-<h2>Q: Worker Task 通过sudo -u 租户 sh xxx.command会产生子进程,在kill的时候,是否会杀掉</h2>
-<p>A: 我们会在1.0.4中增加kill任务同时,kill掉任务产生的各种所有子进程</p>
-<h2>Q : DolphinScheduler中的队列怎么用,用户队列和租户队列是什么意思</h2>
-<p>A : DolphinScheduler
中的队列可以在用户或者租户上指定队列,<strong>用户指定的队列优先级是高于租户队列的优先级的。</strong>,例如:对MR任务指定队列,是通过
mapreduce.job.queuename 来指定队列的。</p>
-<p>注意:MR在用以上方法指定队列的时候,传递参数请使用如下方式:</p>
+<p> 2,对 1.0.3 之前的版本,Python 任务只能支持系统的 Python 版本,不支持指定 Python 版本</p>
+<h2>Q:Worker Task 通过 sudo -u 租户 sh xxx.command 会产生子进程,在 kill 的时候,是否会杀掉</h2>
+<p>A: 我们会在 1.0.4 中增加 kill 任务同时,kill 掉任务产生的各种所有子进程</p>
+<h2>Q:DolphinScheduler 中的队列怎么用,用户队列和租户队列是什么意思</h2>
+<p>A : DolphinScheduler
中的队列可以在用户或者租户上指定队列,<strong>用户指定的队列优先级是高于租户队列的优先级的。</strong>,例如:对 MR 任务指定队列,是通过
mapreduce.job.queuename 来指定队列的。</p>
+<p>注意:MR 在用以上方法指定队列的时候,传递参数请使用如下方式:</p>
<pre><code> Configuration conf = new
Configuration();
GenericOptionsParser optionParser = new GenericOptionsParser(conf,
args);
String[] remainingArgs = optionParser.getRemainingArgs();
</code></pre>
-<p>如果是Spark任务 --queue 方式指定队列</p>
-<h2>Q : Master 或者 Worker报如下告警</h2>
+<p>如果是 Spark 任务 --queue 方式指定队列</p>
+<h2>Q:Master 或者 Worker 报如下告警</h2>
<p align="center">
<img
src="https://analysys.github.io/easyscheduler_docs_cn/images/master_worker_lack_res.png"
width="60%" />
</p>
-<p>A : 修改conf下的 master.properties <strong>master.reserved.memory</strong>
的值为更小的值,比如说0.1 或者</p>
-<p>worker.properties <strong>worker.reserved.memory</strong> 的值为更小的值,比如说0.1</p>
-<h2>Q : hive版本是1.1.0+cdh5.15.0,SQL hive任务连接报错</h2>
+<p>A : 修改 conf 下的 master.properties <strong>master.reserved.memory</strong>
的值为更小的值,比如说 0.1 或者</p>
+<p>worker.properties <strong>worker.reserved.memory</strong> 的值为更小的值,比如说
0.1</p>
+<h2>Q:hive 版本是 1.1.0+cdh5.15.0,SQL hive 任务连接报错</h2>
<p align="center">
<img
src="https://analysys.github.io/easyscheduler_docs_cn/images/cdh_hive_error.png"
width="60%" />
</p>
-<p>A : 将 hive pom</p>
+<p>A: 将 hive pom</p>
<pre><code><dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
@@ -184,6 +187,193 @@
<version>1.1.0</version>
</dependency>
</code></pre>
+<hr>
+<h2>Q:如何增加一台工作服务器</h2>
+<p>A: 1,参考官网<a
href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/cluster-deployment.html">部署文档</a>
1.3 小节,创建部署用户和 hosts 映射</p>
+<p> 2,参考官网<a
href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/cluster-deployment.html">部署文档</a>
1.4 小节,配置 hosts 映射和 ssh 打通及修改目录权限.
+1.4 小节的最后一步是在当前新增机器上执行的,即需要给部署目录部署用户的权限</p>
+<p> 3,复制正在运行的服务器上的部署目录到新机器的同样的部署目录下</p>
+<p> 4,到 bin 下,启动 worker server 和 logger server</p>
+<pre><code> ./dolphinscheduler-daemon.sh start worker-server
+ ./dolphinscheduler-daemon.sh start logger-server
+</code></pre>
+<hr>
+<h2>Q:DolphinScheduler 什么时候发布新版本,同时新旧版本区别,以及如何升级,版本号规范</h2>
+<p>A:1,Apache 项目的发版流程是通过邮件列表完成的。 你可以订阅 DolphinScheduler
的邮件列表,订阅之后如果有发版,你就可以收到邮件。请参照这篇<a
href="https://github.com/apache/incubator-dolphinscheduler#get-help">指引</a>来订阅
DolphinScheduler 的邮件列表。</p>
+<p>2,当项目发版的时候,会有发版说明告知具体的变更内容,同时也会有从旧版本升级到新版本的升级文档。</p>
+<p>3,版本号为 x.y.z, 当 x 增加时代表全新架构的版本。当 y 增加时代表与 y 版本之前的不兼容需要升级脚本或其他人工处理才能升级。当 z
增加代表是 bug 修复,升级完全兼容。无需额外处理。之前有个问题 1.0.2 的升级不兼容 1.0.1 需要升级脚本。</p>
+<hr>
+<h2>Q:后续任务在前置任务失败情况下仍旧可以执行</h2>
+<p>A:在启动工作流的时候,你可以设置失败策略:继续还是失败。
+<img
src="https://user-images.githubusercontent.com/15833811/80368215-ee378080-88be-11ea-9074-01a33d012b23.png"
alt="设置任务失败策略"></p>
+<hr>
+<h2>Q:工作流模板 DAG、工作流实例、工作任务及实例之间是什么关系 工作流模板 DAG、工作流实例、工作任务及实例之间是什么关系,一个 dag
支持最大并发 100,是指产生 100 个工作流实例并发运行吗?一个 dag 中的任务节点,也有并发数的配置,是指任务也可以并发多个线程运行吗?最大数 100
吗?</h2>
+<p>A:</p>
+<p>1.2.1 version</p>
+<pre><code> master.properties
+ 设置 master 节点并发执行的最大工作流数
+ master.exec.threads=100
+
+ Control the number of parallel tasks in each workflow
+ 设置每个工作流可以并发执行的最大任务数
+ master.exec.task.number=20
+
+ worker.properties
+ 设置 workder 节点并发执行的最大任务数
+ worker.exec.threads=100
+</code></pre>
+<hr>
+<h2>Q:工作组管理页面没有展示按钮</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/39816903/81903776-d8cb9180-95f4-11ea-98cb-94ca1e6a1db5.png"
width="60%" />
+</p>
+A:1.3.0 版本,为了支持 k8s,worker ip 一直变动,因此我们不能在 UI 界面上配置,工作组可以配置在 worker.properties
上配置名称。
+<hr>
+<h2>Q:为什么不把 mysql 的 jdbc 连接包添加到 docker 镜像里面</h2>
+<p>A:Mysql jdbc 连接包的许可证和 apache v2 的许可证不兼容,因此它不能被加入到 docker 镜像里面。</p>
+<hr>
+<h2>Q:当一个任务提交多个 yarn 程序的时候经常失败</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/16174111/81312485-476e9380-90b9-11ea-9aad-ed009db899b1.png"
width="60%" />
+</p>
+A:这个 Bug 在 dev 分支已修复,并加入到需求/待做列表。
+<hr>
+<h2>Q:Master 服务和 Workder 服务在运行几天之后停止了</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/18378986/81293969-c3101680-90a0-11ea-87e5-ac9f0dd53f5e.png"
width="60%" />
+</p>
+A:会话超时时间太短了,只有 0.3 秒,修改 zookeeper.properties 的配置项:
+<pre><code> zookeeper.session.timeout=60000
+ zookeeper.connection.timeout=30000
+</code></pre>
+<hr>
+<h2>Q:使用 docker-compose 默认配置启动,显示 zookeeper 错误</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/42579056/80374318-13c98780-88c9-11ea-8d5f-53448b957f02.png"
width="60%" />
+ </p>
+A:这个问题在 dev-1.3.0 版本解决了。这个
[pr](https://github.com/apache/incubator-dolphinscheduler/pull/2595) 已经解决了这个
bug,主要的改动点:
+<pre><code>
在docker-compose.yml文件中增加zookeeper的环境变量ZOO_4LW_COMMANDS_WHITELIST。
+ 把minLatency,avgLatency and maxLatency的类型从int改成float。
+</code></pre>
+<hr>
+<h2>Q:界面上显示任务一直运行,结束不了,从日志上看任务实例为空</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/51871547/80302626-b1478d00-87dd-11ea-97d4-08aa2244a6d0.jpg"
width="60%" />
+ </p>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/51871547/80302626-b1478d00-87dd-11ea-97d4-08aa2244a6d0.jpg"
width="60%" />
+ </p>
+A:这个 [bug](https://github.com/apache/incubator-dolphinscheduler/issues/1477)
描述了问题的详情。这个问题在 1.2.1 版本已经被修复了。
+对于 1.2.1 以下的版本,这种情况的一些提示:
+<pre><code>1,清空 zk 下这个路径的任务:/dolphinscheduler/task_queue
+2,修改任务状态为失败(int 值 6)
+3,运行工作流来从失败中恢复
+</code></pre>
+<hr>
+<h2>Q:zk 中注册的 master 信息 ip 地址是 127.0.0.1,而不是配置的域名所对应或者解析的 ip
地址,可能导致不能查看任务日志</h2>
+<p>A:修复 bug:</p>
+<pre><code> 1、confirm hostname
+ $hostname
+ hadoop1
+ 2、hostname -i
+ 127.0.0.1 10.3.57.15
+ 3、edit /etc/hosts,delete hadoop1 from 127.0.0.1 record
+ $cat /etc/hosts
+ 127.0.0.1 localhost
+ 10.3.57.15 ds1 hadoop1
+ 4、hostname -i
+ 10.3.57.15
+</code></pre>
+<p>hostname 命令返回服务器主机名,hostname -i 返回的是服务器主机名在 /etc/hosts 中所有匹配的ip地址。所以我把
/etc/hosts 中 127.0.0.1 中的主机名删掉,只保留内网 ip 的解析就可以了,没必要把 127.0.0.1 整条注释掉, 只要
hostname 命令返回值在 /etc/hosts 中对应的内网 ip 正确就可以,ds 程序取了第一个值,我理解上 ds 程序不应该用 hostname
-i 取值这样有点问题,因为好多公司服务器的主机名都是运维配置的,感觉还是直接取配置文件的域名解析的返回 ip 更准确,或者 znode 中存域名信息而不是
/etc/hosts。</p>
+<hr>
+<h2>Q:调度系统设置了一个秒级的任务,导致系统挂掉</h2>
+<p>A:调度系统不支持秒级任务。</p>
+<hr>
+<h2>Q:编译前后端代码 (dolphinscheduler-ui) 报错不能下载"<a
href="https://github.com/sass/node-sass/releases/download/v4.13.1/darwin-x64-72_binding.node">https://github.com/sass/node-sass/releases/download/v4.13.1/darwin-x64-72_binding.node</a>"</h2>
+<p>A:1,cd dolphinscheduler-ui 然后删除 node_modules 目录</p>
+<pre><code>sudo rm -rf node_modules
+</code></pre>
+<p> 2,通过 <a href="http://npm.taobao.org">npm.taobao.org</a> 下载 node-sass</p>
+<pre><code>sudo npm uninstall node-sass
+sudo npm i node-sass
--sass_binary_site=https://npm.taobao.org/mirrors/node-sass/
+</code></pre>
+<p>3,如果步骤 2 报错,请重新构建 node-saas <a
href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/frontend-development.html">参考链接</a></p>
+<pre><code> sudo npm rebuild node-sass
+</code></pre>
+<p>当问题解决之后,如果你不想每次编译都下载这个 node,你可以设置系统环境变量:SASS_BINARY_PATH=
/xxx/xxx/xxx/xxx.node。</p>
+<hr>
+<h2>Q:当使用 mysql 作为 ds 数据库需要如何配置</h2>
+<p>A:1,修改项目根目录 maven 配置文件,移除 scope 的 test 属性,这样 mysql 的包就可以在其它阶段被加载</p>
+<pre><code><dependency>
+ <groupId>mysql</groupId>
+ <artifactId>mysql-connector-java</artifactId>
+ <version>${mysql.connector.version}</version>
+ <scope>test<scope>
+</dependency>
+</code></pre>
+<p> 2,修改 application-dao.properties 和 quzrtz.properties 来使用 mysql 驱动
+默认驱动是 postgres 主要由于许可证原因。</p>
+<hr>
+<h2>Q:shell 任务是如何运行的</h2>
+<p>A:1,被执行的服务器在哪里配置,以及实际执行的服务器是哪台? 要指定在某个 worker 上去执行,可以在 worker 分组中配置,固定
IP,这样就可以把路径写死。如果配置的 workder 分组有多个 workder,实际执行的服务器由调度决定的,具有随机性。</p>
+<p> 2,如果是服务器上某个路径的一个 shell 文件,怎么指向这个路径?服务器上某个路径下的 shell
文件,涉及到权限问题,不建议这么做。建议你可以使用资源中心的存储功能,然后在 shell
编辑器里面使用资源引用就可以,系统会帮助你把脚本下载到执行目录下。如果以 hdfs 作为资源中心,在执行的时候,调度器会把依赖的 jar 包,文件等资源拉到
worker 的执行目录上,我这边是 /tmp/escheduler/exec/process,该配置可以在 <a
href="http://install.sh">install.sh</a> 中进行指定。</p>
+<p>3,以哪个用户来执行任务?执行任务的时候,调度器会采用 sudo -u 租户的方式去执行,租户是一个 linux 用户。</p>
+<hr>
+<h2>Q:生产环境部署方式有推荐的最佳实践吗</h2>
+<p>A:1,如果没有很多任务要运行,出于稳定性考虑我们建议使用 3 个节点,并且最好把 Master/Worder
服务部署在不同的节点。如果你只有一个节点,当然只能把所有的服务部署在同一个节点!通常来说,需要多少节点取决于你的业务,海豚调度系统本身不需要很多的资源。充分测试之后,你们将找到使用较少节点的合适的部署方式。</p>
+<hr>
+<h2>Q:DEPENDENT 节点</h2>
+<p>A:1,DEPENDENT 节点实际是没有执行体的,是专门用来配置数据周期依赖逻辑,然后再把执行节点挂载后面,来实现任务间的周期依赖。</p>
+<hr>
+<h2>Q:如何改变 Master 服务的启动端口</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/8263441/62352160-0f3e9100-b53a-11e9-95ba-3ae3dde49c72.png"
width="60%" />
+ </p>
+A:1,修改 application_master.properties 配置文件,例如:server.port=12345。
+<hr>
+<h2>Q:调度任务不能上线</h2>
+<p>A:1,我们可以成功创建调度任务,并且表 t_scheduler_schedules 中也成功加入了一条记录,但当我点击上线后,前端页面无反应且会把
t_scheduler_schedules 这张表锁定,我测试过将 t_scheduler_schedules 中的 RELEASE_state
字段手动更新为 1 这样前端会显示为上线状态。DS 版本 1.2+ 表名是 t_ds_schedules,其它版本表名是
t_scheduler_schedules。</p>
+<hr>
+<h2>Q:请问 swagger ui 的地址是什么</h2>
+<p>A:1,1.2+ 版本地址是:<a
href="http://apiServerIp">http://apiServerIp</a>:apiServerPort/dolphinscheduler/doc.html?language=zh_CN&lang=cn,其它版本是
<a
href="http://apiServerIp">http://apiServerIp</a>:apiServerPort/escheduler/doc.html?language=zh_CN&lang=cn。</p>
+<hr>
+<h2>Q:前端安装包缺少文件</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/41460919/61437083-d960b080-a96e-11e9-87f1-297ba3aca5e3.png"
width="60%" />
+ </p>
+ <p align="center">
+ <img
src="https://user-images.githubusercontent.com/41460919/61437218-1b89f200-a96f-11e9-8e48-3fac47eb2389.png"
width="60%" />
+ </p>
+A: 1,用户修改了 api server
配置文件中的配置项,导致了这个问题,恢复成默认配置之后问题解决。
+<hr>
+<h2>Q:上传比较大的文件卡住</h2>
+<p align="center">
+ <img
src="https://user-images.githubusercontent.com/21357069/58231400-805b0e80-7d69-11e9-8107-7f37b06a95df.png"
width="60%" />
+ </p>
+A:1,编辑 ngnix 配置文件 vi /etc/nginx/nginx.conf,更改上传大小 client_max_body_size 1024m。
+<p> 2,更新 google chrome 版本到最新版本。</p>
+<hr>
+<h2>Q:创建 spark 数据源,点击“测试连接”,系统回退回到登入页面</h2>
+<p>A:1,edit /etc/nginx/conf.d/escheduler.conf</p>
+<pre><code> proxy_connect_timeout 300s;
+ proxy_read_timeout 300s;
+ proxy_send_timeout 300s;
+</code></pre>
+<hr>
+<h2>Q:欢迎订阅 DolphinScheduler 开发邮件列表</h2>
+<p>A:在使用 DolphinScheduler 的过程中,如果您有任何问题或者想法、建议,都可以通过 Apache 邮件列表参与到
DolphinScheduler 的社区建设中来。
+发送订阅邮件也非常简单,步骤如下:</p>
+<p>1,用自己的邮箱向 <a
href="mailto:[email protected]">[email protected]</a>
发送一封邮件,主题和内容任意。</p>
+<p>2, 接收确认邮件并回复。 完成步骤1后,您将收到一封来自 <a
href="mailto:[email protected]">[email protected]</a>
的确认邮件(如未收到,请确认邮件是否被自动归入垃圾邮件、推广邮件、订阅邮件等文件夹)。然后直接回复该邮件,或点击邮件里的链接快捷回复即可,主题和内容任意。</p>
+<p>3, 接收欢迎邮件。 完成以上步骤后,您会收到一封主题为 WELCOME to <a
href="mailto:[email protected]">[email protected]</a>
的欢迎邮件,至此您已成功订阅 Apache DolphinScheduler(Incubating) 的邮件列表。</p>
+<hr>
+<h2>Q:工作流依赖</h2>
+<p>A:1,目前是按照自然天来判断,上月末:判断时间是工作流 A start_time/scheduler_time between
'2019-05-31 00:00:00' and '2019-05-31 23:59:59'。上月:是判断上个月从 1
号到月末每天都要有完成的A实例。上周: 上周 7 天都要有完成的 A 实例。前两天: 判断昨天和前天,两天都要有完成的 A 实例。</p>
+<hr>
+<h2>Q:DS 后端接口文档</h2>
+<p>A:1,<a
href="http://106.75.43.194:8888/dolphinscheduler/doc.html?language=zh_CN&lang=zh%E3%80%82">http://106.75.43.194:8888/dolphinscheduler/doc.html?language=zh_CN&lang=zh。</a></p>
+<hr>
+<p>我们会持续收集更多的 FAQ。</p>
</div></section><footer class="footer-container"><div class="footer-body"><img
src="/img/ds_gray.svg"/><div class="cols-container"><div class="col
col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort
undergoing incubation at The Apache Software Foundation (ASF), sponsored by
Incubator.
Incubation is required of all newly accepted projects until a further review
indicates
that the infrastructure, communications, and decision making process have
stabilized in a manner consistent with other successful ASF projects.
diff --git a/zh-cn/docs/release/faq.json b/zh-cn/docs/release/faq.json
index f128e94..7b321e1 100644
--- a/zh-cn/docs/release/faq.json
+++ b/zh-cn/docs/release/faq.json
@@ -1,6 +1,6 @@
{
"filename": "faq.md",
- "__html": "<h2>Q:DolphinScheduler服务介绍及建议运行内存</h2>\n<p>A:
DolphinScheduler由5个服务组成,MasterServer、WorkerServer、ApiServer、AlertServer、LoggerServer和UI。</p>\n<table>\n<thead>\n<tr>\n<th>服务</th>\n<th>说明</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>MasterServer</td>\n<td>主要负责
<strong>DAG</strong>
的切分和任务状态的监控</td>\n</tr>\n<tr>\n<td>WorkerServer/LoggerServer</td>\n<td>主要负责任务的提交、执行和任务状态的更新。LoggerServer用于Rest
Api通过 <strong>RPC</strong>
查看日志</td>\n</tr>\n<tr>\n<td>ApiServer</td>\n<td>提供Rest Api服务,供UI进行 [...]
+ "__html": "<h2>Q:项目的名称是?</h2>\n<p>A:1.2 版本以前项目名称是 eschedule,1.2 以及之后的版本叫做
dolphinScheduler。</p>\n<hr>\n<h2>Q:DolphinScheduler
服务介绍及建议运行内存</h2>\n<p>A:DolphinScheduler 由 5
个服务组成,MasterServer、WorkerServer、ApiServer、AlertServer、LoggerServer 和
UI。</p>\n<table>\n<thead>\n<tr>\n<th>服务</th>\n<th>说明</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>MasterServer</td>\n<td>主要负责
<strong>DAG</strong>
的切分和任务状态的监控</td>\n</tr>\n<tr>\n<td>WorkerServer/LoggerServer</td>\n<td>主要负责任务的提交、执行和任务状态的更新。LoggerServer
用于 [...]
"link": "/zh-cn/docs/release/faq.html",
"meta": {}
}
\ No newline at end of file