status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
*/ public static final String MAIN_CLASS = "--class"; /** * --driver-cores NUM */ public static final String DRIVER_CORES = "--driver-cores"; /** * --driver-memory MEM */ public static final String DRIVER_MEMORY = "--driver-memory"; /** * --num-executors NUM */ public static final String NUM_EXECUTORS = "--num-executors"; /** * --executor-cores NUM */ public static final String EXECUTOR_CORES = "--executor-cores"; /** * --executor-memory MEM */ public static final String EXECUTOR_MEMORY = "--executor-memory"; /** * --queue QUEUE */ public static final String SPARK_QUEUE = "--queue"; /** * --queue --qu */ public static final String FLINK_QUEUE = "--qu";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; /** * exit code kill */ public static final int EXIT_CODE_KILL = 137; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
*/ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * ACCEPTED */ public static final String ACCEPTED = "ACCEPTED"; /** * SUCCEEDED */ public static final String SUCCEEDED = "SUCCEEDED"; /** * NEW */ public static final String NEW = "NEW"; /** * NEW_SAVING */ public static final String NEW_SAVING = "NEW_SAVING"; /** * SUBMITTED */ public static final String SUBMITTED = "SUBMITTED"; /** * FAILED */ public static final String FAILED = "FAILED"; /** * KILLED */ public static final String KILLED = "KILLED";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/** * RUNNING */ public static final String RUNNING = "RUNNING"; /** * underline "_" */ public static final String UNDERLINE = "_"; /** * quartz job prifix */ public static final String QUARTZ_JOB_PRIFIX = "job"; /** * quartz job group prifix */ public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup"; /** * projectId */ public static final String PROJECT_ID = "projectId"; /** * processId */ public static final String SCHEDULE_ID = "scheduleId"; /** * schedule */ public static final String SCHEDULE = "schedule"; /** * application regex
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
*/ public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; public static final String PID = OSUtils.isWindows() ? "handle" : "pid"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*';
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String MULTIPLY_STRING = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String TASK_LIST = "taskList"; public static final String RWXR_XR_X = "rwxr-xr-x"; /** * master/worker server use for zk */ public static final String MASTER_PREFIX = "master"; public static final String WORKER_PREFIX = "worker"; public static final String DELETE_ZK_OP = "delete"; public static final String ADD_ZK_OP = "add"; public static final String ALIAS = "alias"; public static final String CONTENT = "content"; public static final String DEPENDENT_SPLIT = ":||"; public static final String DEPENDENT_ALL = "ALL"; /** * preview schedule execute count */ public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; /** * com.amazonaws.services.s3.enableV4 */ public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
*/ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * default worker group id */ public static final int DEFAULT_WORKER_ID = -1; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; /** * hive conf */ public static final String HIVE_CONF = "hiveconf:"; public static final String FLINK_YARN_CLUSTER = "yarn-cluster"; public static final String FLINK_RUN_MODE = "-m"; public static final String FLINK_YARN_SLOT = "-ys"; public static final String FLINK_APP_NAME = "-ynm"; public static final String FLINK_TASK_MANAGE = "-yn"; public static final String FLINK_JOB_MANAGE_MEM = "-yjm"; public static final String FLINK_TASK_MANAGE_MEM = "-ytm"; public static final String FLINK_DETACH = "-d"; public static final String FLINK_MAIN_CLASS = "-c"; public static final int[] NOT_TERMINATED_STATES = new int[] { ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal(), ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(), ExecutionStatus.WAITTING_THREAD.ordinal(), ExecutionStatus.WAITTING_DEPEND.ordinal() }; /** * status */ public static final String STATUS = "status"; /** * message */ public static final String MSG = "msg"; /** * data total */ public static final String COUNT = "count"; /** * page size */ public static final String PAGE_SIZE = "pageSize"; /** * current page no */ public static final String PAGE_NUMBER = "pageNo"; /** *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
*/ public static final String DATA_LIST = "data"; public static final String TOTAL_LIST = "totalList"; public static final String CURRENT_PAGE = "currentPage"; public static final String TOTAL_PAGE = "totalPage"; public static final String TOTAL = "total"; /** * session user */ public static final String SESSION_USER = "session.user"; public static final String SESSION_ID = "sessionId"; public static final String PASSWORD_DEFAULT = "******"; /** * driver */ public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver"; public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver"; public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver"; public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver"; public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver"; public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver"; public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver"; /** * database type */ public static final String MYSQL = "MYSQL"; public static final String POSTGRESQL = "POSTGRESQL"; public static final String HIVE = "HIVE"; public static final String SPARK = "SPARK";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
public static final String CLICKHOUSE = "CLICKHOUSE"; public static final String ORACLE = "ORACLE"; public static final String SQLSERVER = "SQLSERVER"; public static final String DB2 = "DB2"; public static final String PRESTO = "PRESTO"; /** * jdbc url */ public static final String JDBC_MYSQL = "jdbc:mysql://"; public static final String JDBC_POSTGRESQL = "jdbc:postgresql://"; public static final String JDBC_HIVE_2 = "jdbc:hive2://"; public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://"; public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@"; public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//"; public static final String JDBC_SQLSERVER = "jdbc:sqlserver://"; public static final String JDBC_DB2 = "jdbc:db2://"; public static final String JDBC_PRESTO = "jdbc:presto://"; public static final String ADDRESS = "address"; public static final String DATABASE = "database"; public static final String JDBC_URL = "jdbcUrl"; public static final String PRINCIPAL = "principal"; public static final String OTHER = "other"; public static final String ORACLE_DB_CONNECT_TYPE = "connectType"; /** * session timeout */ public static final int SESSION_TIME_OUT = 7200; public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024; public static final String UDF = "UDF"; public static final String CLASS = "class";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
public static final String RECEIVERS = "receivers"; public static final String RECEIVERS_CC = "receiversCc"; /** * dataSource sensitive param */ public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))"; /** * default worker group */ public static final String DEFAULT_WORKER_GROUP = "default"; public static final Integer TASK_INFO_LENGTH = 5; /** * new * schedule time */ public static final String PARAMETER_SHECDULE_TIME = "schedule.time"; /** * authorize writable perm */ public static final int AUTHORIZE_WRITABLE_PERM = 7; /** * authorize readable perm */ public static final int AUTHORIZE_READABLE_PERM = 4; /** * plugin configurations */ public static final String PLUGIN_JAR_SUFFIX = ".jar"; public static final int NORAML_NODE_STATUS = 0; public static final int ABNORMAL_NODE_STATUS = 1;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
public static final String START_TIME = "start time"; public static final String END_TIME = "end time"; /** * system line separator */ public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator"); /** * net system properties */ public static final String DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE = "dolphin.scheduler.network.interface.preferred"; /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * Network IP gets priority, default inner outer */ public static final String NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy"; /** * exec shell scripts */ public static final String SH = "sh"; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.utils; import org.apache.commons.lang.StringUtils; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.flink.FlinkParameters; import java.util.ArrayList; import java.util.List; /** * flink args utils */ public class FlinkArgsUtils {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java
private static final String LOCAL_DEPLOY_MODE = "local"; private static final String FLINK_VERSION_BEFORE_1_10 = "<1.10"; /** * build args * @param param flink parameters * @return argument list */ public static List<String> buildArgs(FlinkParameters param) { List<String> args = new ArrayList<>(); String deployMode = "cluster"; String tmpDeployMode = param.getDeployMode(); if (StringUtils.isNotEmpty(tmpDeployMode)) { deployMode = tmpDeployMode; } if (!LOCAL_DEPLOY_MODE.equals(deployMode)) { args.add(Constants.FLINK_RUN_MODE); args.add(Constants.FLINK_YARN_CLUSTER); int slot = param.getSlot(); if (slot != 0) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java
args.add(Constants.FLINK_YARN_SLOT); args.add(String.format("%d", slot)); } String appName = param.getAppName(); if (StringUtils.isNotEmpty(appName)) { args.add(Constants.FLINK_APP_NAME); args.add(appName); } String flinkVersion = param.getFlinkVersion(); if (FLINK_VERSION_BEFORE_1_10.equals(flinkVersion)) { int taskManager = param.getTaskManager(); if (taskManager != 0) { args.add(Constants.FLINK_TASK_MANAGE); args.add(String.format("%d", taskManager)); } } String jobManagerMemory = param.getJobManagerMemory(); if (StringUtils.isNotEmpty(jobManagerMemory)) { args.add(Constants.FLINK_JOB_MANAGE_MEM); args.add(jobManagerMemory); } String taskManagerMemory = param.getTaskManagerMemory(); if (StringUtils.isNotEmpty(taskManagerMemory)) { args.add(Constants.FLINK_TASK_MANAGE_MEM); args.add(taskManagerMemory); } args.add(Constants.FLINK_DETACH); } ProgramType programType = param.getProgramType();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java
String mainClass = param.getMainClass(); if (programType != null && programType != ProgramType.PYTHON && StringUtils.isNotEmpty(mainClass)) { args.add(Constants.FLINK_MAIN_CLASS); args.add(param.getMainClass()); } ResourceInfo mainJar = param.getMainJar(); if (mainJar != null) { args.add(mainJar.getRes()); } String mainArgs = param.getMainArgs(); if (StringUtils.isNotEmpty(mainArgs)) { args.add(mainArgs); } String others = param.getOthers(); String queue = param.getQueue(); if (StringUtils.isNotEmpty(others)) { if (!others.contains(Constants.FLINK_QUEUE) && StringUtils.isNotEmpty(queue) && !deployMode.equals(LOCAL_DEPLOY_MODE)) { args.add(Constants.FLINK_QUEUE); args.add(param.getQueue()); } args.add(others); } else if (StringUtils.isNotEmpty(queue) && !deployMode.equals(LOCAL_DEPLOY_MODE)) { args.add(Constants.FLINK_QUEUE); args.add(param.getQueue()); } return args; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
* http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task.flink; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.flink.FlinkParameters; import org.apache.dolphinscheduler.common.utils.*; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.server.utils.FlinkArgsUtils; import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask; import org.slf4j.Logger; import java.util.ArrayList; import java.util.List; import java.util.Map; /** * flink task */ public class FlinkTask extends AbstractYarnTask {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
/** * flink command */ private static final String FLINK_COMMAND = "flink"; private static final String FLINK_RUN = "run"; /** * flink parameters */ private FlinkParameters flinkParameters; /** * taskExecutionContext */ private TaskExecutionContext taskExecutionContext; public FlinkTask(TaskExecutionContext taskExecutionContext, Logger logger) { super(taskExecutionContext, logger); this.taskExecutionContext = taskExecutionContext; } @Override public void init() { logger.info("flink task params {}", taskExecutionContext.getTaskParams()); flinkParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), FlinkParameters.class); if (!flinkParameters.checkParameters()) { throw new RuntimeException("flink task params is not valid"); } flinkParameters.setQueue(taskExecutionContext.getQueue()); setMainJarName();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
if (StringUtils.isNotEmpty(flinkParameters.getMainArgs())) { String args = flinkParameters.getMainArgs(); Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), flinkParameters.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); logger.info("param Map : {}", paramsMap); if (paramsMap != null ){ args = ParameterUtils.convertParameterPlaceholders(args, ParamUtils.convert(paramsMap)); logger.info("param args : {}", args); } flinkParameters.setMainArgs(args); } } /** * create command * @return command */ @Override protected String buildCommand() { List<String> args = new ArrayList<>(); args.add(FLINK_COMMAND); args.add(FLINK_RUN); logger.info("flink task args : {}", args); args.addAll(FlinkArgsUtils.buildArgs(flinkParameters)); String command = ParameterUtils .convertParameterPlaceholders(String.join(" ", args), taskExecutionContext.getDefinedParams());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
logger.info("flink task command : {}", command); return command; } @Override protected void setMainJarName() { ResourceInfo mainJar = flinkParameters.getMainJar(); if (mainJar != null) { int resourceId = mainJar.getId(); String resourceName; if (resourceId == 0) { resourceName = mainJar.getRes(); } else { Resource resource = processService.getResourceById(flinkParameters.getMainJar().getId()); if (resource == null) { logger.error("resource id: {} not exist", resourceId); throw new RuntimeException(String.format("resource id: %d not exist", resourceId)); } resourceName = resource.getFullName().replaceFirst("/", ""); } mainJar.setRes(resourceName); flinkParameters.setMainJar(mainJar); } } @Override public AbstractParameters getParameters() { return flinkParameters; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.utils;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtilsTest.java
import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.flink.FlinkParameters; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; /** * Test FlinkArgsUtils */ public class FlinkArgsUtilsTest { private static final Logger logger = LoggerFactory.getLogger(FlinkArgsUtilsTest.class); public String mode = "cluster"; public int slot = 2; public String appName = "testFlink"; public int taskManager = 4; public String taskManagerMemory = "2G"; public String jobManagerMemory = "4G"; public ProgramType programType = ProgramType.JAVA; public String mainClass = "com.test"; public ResourceInfo mainJar = null; public String mainArgs = "testArgs"; public String queue = "queue1"; public String others = "--input file:///home"; public String flinkVersion = "<1.10"; @Before public void setUp() throws Exception {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtilsTest.java
ResourceInfo main = new ResourceInfo(); main.setRes("testflink-1.0.0-SNAPSHOT.jar"); mainJar = main; } /** * Test buildArgs */ @Test public void testBuildArgs() { FlinkParameters param = new FlinkParameters(); param.setDeployMode(mode); param.setMainClass(mainClass); param.setAppName(appName); param.setSlot(slot); param.setTaskManager(taskManager); param.setJobManagerMemory(jobManagerMemory); param.setTaskManagerMemory(taskManagerMemory); param.setMainJar(mainJar); param.setProgramType(programType); param.setMainArgs(mainArgs); param.setQueue(queue); param.setOthers(others); param.setFlinkVersion(flinkVersion); List<String> result = FlinkArgsUtils.buildArgs(param); for (String s : result) { logger.info(s); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,457
[bug] flink args build problem
**Describe the bug** There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters **To Reproduce** 1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed 2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid !!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set 2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments: 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm 2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm 2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
https://github.com/apache/dolphinscheduler/issues/3457
https://github.com/apache/dolphinscheduler/pull/4166
68541f281d0b0908f605ad49847d3e7acdd5a302
cbc30b4900215424dcbbfb49539259d32273efc3
"2020-08-10T12:56:16Z"
java
"2020-12-10T14:37:21Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtilsTest.java
assertEquals(20, result.size()); assertEquals("-m", result.get(0)); assertEquals("yarn-cluster", result.get(1)); assertEquals("-ys", result.get(2)); assertSame(Integer.valueOf(result.get(3)),slot); assertEquals("-ynm",result.get(4)); assertEquals(result.get(5),appName); assertEquals("-yn", result.get(6)); assertSame(Integer.valueOf(result.get(7)),taskManager); assertEquals("-yjm", result.get(8)); assertEquals(result.get(9),jobManagerMemory); assertEquals("-ytm", result.get(10)); assertEquals(result.get(11),taskManagerMemory); assertEquals("-d", result.get(12)); assertEquals("-c", result.get(13)); assertEquals(result.get(14),mainClass); assertEquals(result.get(15),mainJar.getRes()); assertEquals(result.get(16),mainArgs); assertEquals("--qu", result.get(17)); assertEquals(result.get(18),queue); assertEquals(result.get(19),others); FlinkParameters param1 = new FlinkParameters(); param1.setQueue(queue); param1.setDeployMode(mode); result = FlinkArgsUtils.buildArgs(param1); assertEquals(5, result.size()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
* limitations under the License. */ package org.apache.dolphinscheduler.server.master.consumer; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.SqoopJobType; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.enums.UdfType; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.datax.DataxParameters; import org.apache.dolphinscheduler.common.task.procedure.ProcedureParameters; import org.apache.dolphinscheduler.common.task.sql.SqlParameters; import org.apache.dolphinscheduler.common.task.sqoop.SqoopParameters; import org.apache.dolphinscheduler.common.task.sqoop.sources.SourceMysqlParameter; import org.apache.dolphinscheduler.common.task.sqoop.targets.TargetMysqlParameter; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.EnumUtils; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
import org.apache.dolphinscheduler.server.builder.TaskExecutionContextBuilder; import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.ProcedureTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.SqoopTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskPriority; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher; import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext; import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType; import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; /** * TaskUpdateQueue consumer */ @Component
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
public class TaskPriorityQueueConsumer extends Thread { /** * logger of TaskUpdateQueueConsumer */ private static final Logger logger = LoggerFactory.getLogger(TaskPriorityQueueConsumer.class); /** * taskUpdateQueue */ @Autowired private TaskPriorityQueue taskPriorityQueue; /** * processService */ @Autowired private ProcessService processService; /** * executor dispatcher */ @Autowired private ExecutorDispatcher dispatcher; /** * master config */ @Autowired private MasterConfig masterConfig; @PostConstruct public void init() {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
super.setName("TaskUpdateQueueConsumerThread"); super.start(); } @Override public void run() { List<String> failedDispatchTasks = new ArrayList<>(); while (Stopper.isRunning()) { try { int fetchTaskNum = masterConfig.getMasterDispatchTaskNumber(); failedDispatchTasks.clear(); for (int i = 0; i < fetchTaskNum; i++) { if (taskPriorityQueue.size() <= 0) { Thread.sleep(Constants.SLEEP_TIME_MILLIS); continue; } String taskPriorityInfo = taskPriorityQueue.take(); TaskPriority taskPriority = TaskPriority.of(taskPriorityInfo); boolean dispatchResult = dispatch(taskPriority.getTaskId()); if (!dispatchResult) { failedDispatchTasks.add(taskPriorityInfo); } } for (String dispatchFailedTask : failedDispatchTasks) { taskPriorityQueue.put(dispatchFailedTask); } } catch (Exception e) { logger.error("dispatcher task error", e); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
} /** * dispatch task * * @param taskInstanceId taskInstanceId * @return result */ private boolean dispatch(int taskInstanceId) { boolean result = false; try { TaskExecutionContext context = getTaskExecutionContext(taskInstanceId); ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER, context.getWorkerGroup()); if (taskInstanceIsFinalState(taskInstanceId)) { return true; } else { result = dispatcher.dispatch(executionContext); } } catch (ExecuteException e) { logger.error("dispatch error", e); } return result; } /** * taskInstance is final state * success,failure,kill,stop,pause,threadwaiting is final state * * @param taskInstanceId taskInstanceId * @return taskInstance is final state */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
public Boolean taskInstanceIsFinalState(int taskInstanceId) { TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceId); return taskInstance.getState().typeIsFinished(); } /** * get TaskExecutionContext * * @param taskInstanceId taskInstanceId * @return TaskExecutionContext */ protected TaskExecutionContext getTaskExecutionContext(int taskInstanceId) { TaskInstance taskInstance = processService.getTaskInstanceDetailByTaskId(taskInstanceId); // task type TaskType taskType = TaskType.valueOf(taskInstance.getTaskType()); // task node TaskNode taskNode = JSONUtils.parseObject(taskInstance.getTaskJson(), TaskNode.class); Integer userId = taskInstance.getProcessDefine() == null ? 0 : taskInstance.getProcessDefine().getUserId(); Tenant tenant = processService.getTenantForProcess(taskInstance.getProcessInstance().getTenantId(), userId); // verify tenant is null if (verifyTenantIsNull(tenant, taskInstance)) { processService.changeTaskState(taskInstance, ExecutionStatus.FAILURE, taskInstance.getStartTime(), taskInstance.getHost(), null, null, taskInstance.getId()); return null; } // set queue for process instance, user-specified queue takes precedence over tenant queue String userQueue = processService.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue); taskInstance.getProcessInstance().setTenantCode(tenant.getTenantCode()); taskInstance.setExecutePath(getExecLocalPath(taskInstance)); taskInstance.setResources(getResourceFullNames(taskNode)); SQLTaskExecutionContext sqlTaskExecutionContext = new SQLTaskExecutionContext(); DataxTaskExecutionContext dataxTaskExecutionContext = new DataxTaskExecutionContext(); ProcedureTaskExecutionContext procedureTaskExecutionContext = new ProcedureTaskExecutionContext(); SqoopTaskExecutionContext sqoopTaskExecutionContext = new SqoopTaskExecutionContext(); // SQL task if (taskType == TaskType.SQL) { setSQLTaskRelation(sqlTaskExecutionContext, taskNode); } // DATAX task if (taskType == TaskType.DATAX) { setDataxTaskRelation(dataxTaskExecutionContext, taskNode); } // procedure task if (taskType == TaskType.PROCEDURE) { setProcedureTaskRelation(procedureTaskExecutionContext, taskNode); } if (taskType == TaskType.SQOOP) { setSqoopTaskRelation(sqoopTaskExecutionContext, taskNode); } return TaskExecutionContextBuilder.get() .buildTaskInstanceRelatedInfo(taskInstance) .buildProcessInstanceRelatedInfo(taskInstance.getProcessInstance()) .buildProcessDefinitionRelatedInfo(taskInstance.getProcessDefine()) .buildSQLTaskRelatedInfo(sqlTaskExecutionContext) .buildDataxTaskRelatedInfo(dataxTaskExecutionContext) .buildProcedureTaskRelatedInfo(procedureTaskExecutionContext)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
.buildSqoopTaskRelatedInfo(sqoopTaskExecutionContext) .create(); } /** * set procedure task relation * * @param procedureTaskExecutionContext procedureTaskExecutionContext * @param taskNode taskNode */ private void setProcedureTaskRelation(ProcedureTaskExecutionContext procedureTaskExecutionContext, TaskNode taskNode) { ProcedureParameters procedureParameters = JSONUtils.parseObject(taskNode.getParams(), ProcedureParameters.class); int datasourceId = procedureParameters.getDatasource(); DataSource datasource = processService.findDataSourceById(datasourceId); procedureTaskExecutionContext.setConnectionParams(datasource.getConnectionParams()); } /** * set datax task relation * * @param dataxTaskExecutionContext dataxTaskExecutionContext * @param taskNode taskNode */ private void setDataxTaskRelation(DataxTaskExecutionContext dataxTaskExecutionContext, TaskNode taskNode) { DataxParameters dataxParameters = JSONUtils.parseObject(taskNode.getParams(), DataxParameters.class); DataSource dataSource = processService.findDataSourceById(dataxParameters.getDataSource()); DataSource dataTarget = processService.findDataSourceById(dataxParameters.getDataTarget()); if (dataSource != null) { dataxTaskExecutionContext.setDataSourceId(dataxParameters.getDataSource()); dataxTaskExecutionContext.setSourcetype(dataSource.getType().getCode()); dataxTaskExecutionContext.setSourceConnectionParams(dataSource.getConnectionParams()); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
if (dataTarget != null) { dataxTaskExecutionContext.setDataTargetId(dataxParameters.getDataTarget()); dataxTaskExecutionContext.setTargetType(dataTarget.getType().getCode()); dataxTaskExecutionContext.setTargetConnectionParams(dataTarget.getConnectionParams()); } } /** * set sqoop task relation * * @param sqoopTaskExecutionContext sqoopTaskExecutionContext * @param taskNode taskNode */ private void setSqoopTaskRelation(SqoopTaskExecutionContext sqoopTaskExecutionContext, TaskNode taskNode) { SqoopParameters sqoopParameters = JSONUtils.parseObject(taskNode.getParams(), SqoopParameters.class); // sqoop job type is template set task relation if (sqoopParameters.getJobType().equals(SqoopJobType.TEMPLATE.getDescp())) { SourceMysqlParameter sourceMysqlParameter = JSONUtils.parseObject(sqoopParameters.getSourceParams(), SourceMysqlParameter.class); TargetMysqlParameter targetMysqlParameter = JSONUtils.parseObject(sqoopParameters.getTargetParams(), TargetMysqlParameter.class); DataSource dataSource = processService.findDataSourceById(sourceMysqlParameter.getSrcDatasource()); DataSource dataTarget = processService.findDataSourceById(targetMysqlParameter.getTargetDatasource()); if (dataSource != null) { sqoopTaskExecutionContext.setDataSourceId(dataSource.getId()); sqoopTaskExecutionContext.setSourcetype(dataSource.getType().getCode()); sqoopTaskExecutionContext.setSourceConnectionParams(dataSource.getConnectionParams()); } if (dataTarget != null) { sqoopTaskExecutionContext.setDataTargetId(dataTarget.getId()); sqoopTaskExecutionContext.setTargetType(dataTarget.getType().getCode()); sqoopTaskExecutionContext.setTargetConnectionParams(dataTarget.getConnectionParams()); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
} } /** * set SQL task relation * * @param sqlTaskExecutionContext sqlTaskExecutionContext * @param taskNode taskNode */ private void setSQLTaskRelation(SQLTaskExecutionContext sqlTaskExecutionContext, TaskNode taskNode) { SqlParameters sqlParameters = JSONUtils.parseObject(taskNode.getParams(), SqlParameters.class); int datasourceId = sqlParameters.getDatasource(); DataSource datasource = processService.findDataSourceById(datasourceId); sqlTaskExecutionContext.setConnectionParams(datasource.getConnectionParams()); // whether udf type boolean udfTypeFlag = EnumUtils.isValidEnum(UdfType.class, sqlParameters.getType()) && StringUtils.isNotEmpty(sqlParameters.getUdfs()); if (udfTypeFlag) { String[] udfFunIds = sqlParameters.getUdfs().split(","); int[] udfFunIdsArray = new int[udfFunIds.length]; for (int i = 0; i < udfFunIds.length; i++) { udfFunIdsArray[i] = Integer.parseInt(udfFunIds[i]); } List<UdfFunc> udfFuncList = processService.queryUdfFunListByIds(udfFunIdsArray); Map<UdfFunc, String> udfFuncMap = new HashMap<>(); for (UdfFunc udfFunc : udfFuncList) { String tenantCode = processService.queryTenantCodeByResName(udfFunc.getResourceName(), ResourceType.UDF); udfFuncMap.put(udfFunc, tenantCode); } sqlTaskExecutionContext.setUdfFuncTenantCodeMap(udfFuncMap); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
} /** * get execute local path * * @return execute local path */ private String getExecLocalPath(TaskInstance taskInstance) { return FileUtils.getProcessExecDir(taskInstance.getProcessDefine().getProjectId(), taskInstance.getProcessDefine().getId(), taskInstance.getProcessInstance().getId(), taskInstance.getId()); } /** * whehter tenant is null * * @param tenant tenant * @param taskInstance taskInstance * @return result */ private boolean verifyTenantIsNull(Tenant tenant, TaskInstance taskInstance) { if (tenant == null) { logger.error("tenant not exists,process instance id : {},task instance id : {}", taskInstance.getProcessInstance().getId(), taskInstance.getId()); return true; } return false; } /** * get resource map key is full name and value is tenantCode
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
*/ private Map<String, String> getResourceFullNames(TaskNode taskNode) { Map<String, String> resourceMap = new HashMap<>(); AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams()); if (baseParam != null) { List<ResourceInfo> projectResourceFiles = baseParam.getResourceFilesList(); if (CollectionUtils.isNotEmpty(projectResourceFiles)) { // filter Set<ResourceInfo> oldVersionResources = projectResourceFiles.stream().filter(t -> t.getId() == 0).collect(Collectors.toSet()); if (CollectionUtils.isNotEmpty(oldVersionResources)) { oldVersionResources.forEach( (t) -> resourceMap.put(t.getRes(), processService.queryTenantCodeByResName(t.getRes(), ResourceType.FILE)) ); } // get the Stream<Integer> resourceIdStream = projectResourceFiles.stream().map(resourceInfo -> resourceInfo.getId()); Set<Integer> resourceIdsSet = resourceIdStream.collect(Collectors.toSet()); if (CollectionUtils.isNotEmpty(resourceIdsSet)) { Integer[] resourceIds = resourceIdsSet.toArray(new Integer[resourceIdsSet.size()]); List<Resource> resources = processService.listResourceByIds(resourceIds); resources.forEach( (t) -> resourceMap.put(t.getFullName(), processService.queryTenantCodeByResName(t.getFullName(), ResourceType.FILE)) ); } } } return resourceMap; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.consumer; import java.util.Date; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DbType;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java
import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.registry.DependencyConfig; import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager; import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter; import org.apache.dolphinscheduler.server.zk.SpringZKServer; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue; import org.apache.dolphinscheduler.service.zk.CuratorZookeeperClient; import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator; import org.apache.dolphinscheduler.service.zk.ZookeeperConfig; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; @RunWith(SpringJUnit4ClassRunner.class)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java
@ContextConfiguration(classes={DependencyConfig.class, SpringApplicationContext.class, SpringZKServer.class, CuratorZookeeperClient.class, NettyExecutorManager.class, ExecutorDispatcher.class, ZookeeperRegistryCenter.class, TaskPriorityQueueConsumer.class, ZookeeperNodeManager.class, ZookeeperCachedOperator.class, ZookeeperConfig.class, MasterConfig.class, CuratorZookeeperClient.class}) public class TaskPriorityQueueConsumerTest { @Autowired private TaskPriorityQueue taskPriorityQueue; @Autowired private TaskPriorityQueueConsumer taskPriorityQueueConsumer; @Autowired private ProcessService processService; @Autowired private ExecutorDispatcher dispatcher; @Before public void init(){ Tenant tenant = new Tenant(); tenant.setId(1); tenant.setTenantCode("journey"); tenant.setDescription("journey");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java
tenant.setQueueId(1); tenant.setCreateTime(new Date()); tenant.setUpdateTime(new Date()); Mockito.doReturn(tenant).when(processService).getTenantForProcess(1,2); Mockito.doReturn("default").when(processService).queryUserQueueByProcessInstanceId(1); } @Test public void testSHELLTask() throws Exception { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(1); taskInstance.setTaskType("SHELL"); taskInstance.setProcessDefinitionId(1); taskInstance.setProcessInstanceId(1); taskInstance.setState(ExecutionStatus.KILL); taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-55201\",\"maxRetryTimes\":0,\"name\":\"测试任务\",\"params\":\"{\\\"rawScript\\\":\\\"echo \\\\\\\"测试任务\\\\\\\"\\\",\\\"localParams\\\":[],\\\"resourceList\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SHELL\",\"workerGroup\":\"default\"}"); taskInstance.setProcessInstancePriority(Priority.MEDIUM); taskInstance.setWorkerGroup("default"); taskInstance.setExecutorId(2); ProcessInstance processInstance = new ProcessInstance(); processInstance.setTenantId(1); processInstance.setCommandType(CommandType.START_PROCESS); taskInstance.setProcessInstance(processInstance); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setUserId(2); processDefinition.setProjectId(1); taskInstance.setProcessDefine(processDefinition); Mockito.doReturn(taskInstance).when(processService).getTaskInstanceDetailByTaskId(1); taskPriorityQueue.put("2_1_2_1_default"); Thread.sleep(10000); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java
@Test public void testSQLTask() throws Exception { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(1); taskInstance.setTaskType("SQL"); taskInstance.setProcessDefinitionId(1); taskInstance.setProcessInstanceId(1); taskInstance.setState(ExecutionStatus.KILL); taskInstance.setTaskJson("{\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-3655\",\"maxRetryTimes\":0,\"name\":\"UDF测试\",\"params\":\"{\\\"postStatements\\\":[],\\\"connParams\\\":\\\"\\\",\\\"receiversCc\\\":\\\"\\\",\\\"udfs\\\":\\\"1\\\",\\\"type\\\":\\\"HIVE\\\",\\\"title\\\":\\\"test\\\",\\\"sql\\\":\\\"select id,name,ds,zodia(ds) from t_journey_user\\\",\\\"preStatements\\\":[],\\\"sqlType\\\":0,\\\"receivers\\\":\\\"[email protected]\\\",\\\"datasource\\\":3,\\\"showType\\\":\\\"TABLE\\\",\\\"localParams\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SQL\"}"); taskInstance.setProcessInstancePriority(Priority.MEDIUM); taskInstance.setWorkerGroup("default"); taskInstance.setExecutorId(2); ProcessInstance processInstance = new ProcessInstance(); processInstance.setTenantId(1); processInstance.setCommandType(CommandType.START_PROCESS); taskInstance.setProcessInstance(processInstance); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setUserId(2); processDefinition.setProjectId(1); taskInstance.setProcessDefine(processDefinition); Mockito.doReturn(taskInstance).when(processService).getTaskInstanceDetailByTaskId(1); taskPriorityQueue.put("2_1_2_1_default"); DataSource dataSource = new DataSource(); dataSource.setId(1); dataSource.setName("sqlDatasource"); dataSource.setType(DbType.MYSQL); dataSource.setUserId(2); dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}"); dataSource.setCreateTime(new Date()); dataSource.setUpdateTime(new Date());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java
Mockito.doReturn(dataSource).when(processService).findDataSourceById(1); Thread.sleep(10000); } @Test public void testDataxTask() throws Exception { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(1); taskInstance.setTaskType("DATAX"); taskInstance.setProcessDefinitionId(1); taskInstance.setProcessInstanceId(1); taskInstance.setState(ExecutionStatus.KILL); taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-97625\",\"maxRetryTimes\":0,\"name\":\"MySQL数据相互导入\",\"params\":\"{\\\"targetTable\\\":\\\"pv2\\\",\\\"postStatements\\\":[],\\\"jobSpeedRecord\\\":1000,\\\"customConfig\\\":0,\\\"dtType\\\":\\\"MYSQL\\\",\\\"dsType\\\":\\\"MYSQL\\\",\\\"jobSpeedByte\\\":0,\\\"dataSource\\\":80,\\\"dataTarget\\\":80,\\\"sql\\\":\\\"SELECT dt,count FROM pv\\\",\\\"preStatements\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"DATAX\",\"workerGroup\":\"default\"}"); taskInstance.setProcessInstancePriority(Priority.MEDIUM); taskInstance.setWorkerGroup("default"); taskInstance.setExecutorId(2); ProcessInstance processInstance = new ProcessInstance(); processInstance.setTenantId(1); processInstance.setCommandType(CommandType.START_PROCESS); taskInstance.setProcessInstance(processInstance); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setUserId(2); processDefinition.setProjectId(1); taskInstance.setProcessDefine(processDefinition); Mockito.doReturn(taskInstance).when(processService).getTaskInstanceDetailByTaskId(1); taskPriorityQueue.put("2_1_2_1_default"); DataSource dataSource = new DataSource(); dataSource.setId(80); dataSource.setName("datax"); dataSource.setType(DbType.MYSQL); dataSource.setUserId(2);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java
dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}"); dataSource.setCreateTime(new Date()); dataSource.setUpdateTime(new Date()); Mockito.doReturn(dataSource).when(processService).findDataSourceById(80); Thread.sleep(10000); } @Test public void testSqoopTask() throws Exception { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(1); taskInstance.setTaskType("SQOOP"); taskInstance.setProcessDefinitionId(1); taskInstance.setProcessInstanceId(1); taskInstance.setState(ExecutionStatus.KILL); taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-63634\",\"maxRetryTimes\":0,\"name\":\"MySQL数据导入HDSF\",\"params\":\"{\\\"sourceType\\\":\\\"MYSQL\\\",\\\"targetType\\\":\\\"HDFS\\\",\\\"targetParams\\\":\\\"{\\\\\\\"targetPath\\\\\\\":\\\\\\\"/test/datatest\\\\\\\",\\\\\\\"deleteTargetDir\\\\\\\":true,\\\\\\\"fileType\\\\\\\":\\\\\\\"--as-textfile\\\\\\\",\\\\\\\"compressionCodec\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"fieldsTerminated\\\\\\\":\\\\\\\",\\\\\\\",\\\\\\\"linesTerminated\\\\\\\":\\\\\\\"\\\\\\\\\\\\\\\\n\\\\\\\"}\\\",\\\"modelType\\\":\\\"import\\\",\\\"sourceParams\\\":\\\"{\\\\\\\"srcType\\\\\\\":\\\\\\\"MYSQL\\\\\\\",\\\\\\\"srcDatasource\\\\\\\":1,\\\\\\\"srcTable\\\\\\\":\\\\\\\"t_ds_user\\\\\\\",\\\\\\\"srcQueryType\\\\\\\":\\\\\\\"0\\\\\\\",\\\\\\\"srcQuerySql\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"srcColumnType\\\\\\\":\\\\\\\"0\\\\\\\",\\\\\\\"srcColumns\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"srcConditionList\\\\\\\":[],\\\\\\\"mapColumnHive\\\\\\\":[],\\\\\\\"mapColumnJava\\\\\\\":[]}\\\",\\\"localParams\\\":[],\\\"concurrency\\\":1}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SQOOP\",\"workerGroup\":\"default\"}"); taskInstance.setProcessInstancePriority(Priority.MEDIUM); taskInstance.setWorkerGroup("default"); taskInstance.setExecutorId(2); ProcessInstance processInstance = new ProcessInstance(); processInstance.setTenantId(1); processInstance.setCommandType(CommandType.START_PROCESS); taskInstance.setProcessInstance(processInstance); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setUserId(2); processDefinition.setProjectId(1); taskInstance.setProcessDefine(processDefinition); Mockito.doReturn(taskInstance).when(processService).getTaskInstanceDetailByTaskId(1); taskPriorityQueue.put("2_1_2_1_default"); DataSource dataSource = new DataSource(); dataSource.setId(1);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,138
[Feature][Master] dispatch workgroup error add sleep time
If the task group cannot be found when running a task, a large number of logs will be output ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) It is suggested to add sleep operation When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second ------------------- 如果运行任务的时候,任务的分组找不到,会输出大量的日志. ![image](https://user-images.githubusercontent.com/39816903/100732974-7ad38f80-3408-11eb-88b7-89d32a242acc.png) 建议增加休眠操作: 当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
https://github.com/apache/dolphinscheduler/issues/4138
https://github.com/apache/dolphinscheduler/pull/4139
0039b1bfcbf822cb851898a1ceb4844fd6943731
b3120a74d2656f7ad2054ba8245262551063b549
"2020-12-01T11:08:26Z"
java
"2020-12-11T07:35:40Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java
dataSource.setName("datax"); dataSource.setType(DbType.MYSQL); dataSource.setUserId(2); dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}"); dataSource.setCreateTime(new Date()); dataSource.setUpdateTime(new Date()); Mockito.doReturn(dataSource).when(processService).findDataSourceById(1); Thread.sleep(10000); } @Test public void testTaskInstanceIsFinalState(){ TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(1); taskInstance.setTaskType("SHELL"); taskInstance.setProcessDefinitionId(1); taskInstance.setProcessInstanceId(1); taskInstance.setState(ExecutionStatus.KILL); taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-55201\",\"maxRetryTimes\":0,\"name\":\"测试任务\",\"params\":\"{\\\"rawScript\\\":\\\"echo \\\\\\\"测试任务\\\\\\\"\\\",\\\"localParams\\\":[],\\\"resourceList\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SHELL\",\"workerGroup\":\"default\"}"); taskInstance.setProcessInstancePriority(Priority.MEDIUM); taskInstance.setWorkerGroup("default"); taskInstance.setExecutorId(2); Mockito.doReturn(taskInstance).when(processService).findTaskInstanceById(1); taskPriorityQueueConsumer.taskInstanceIsFinalState(1); } @After public void close() { Stopper.stop(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.entity; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand; import java.io.Serializable; import java.util.Date; import java.util.Map; import com.fasterxml.jackson.annotation.JsonFormat; /** * master/worker task transport */ public class TaskExecutionContext implements Serializable {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
/** * task id */ private int taskInstanceId; /** * task name */ private String taskName; /** * task first submit time.
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
*/ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date firstSubmitTime; /** * task start time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date startTime; /** * task type */ private String taskType; /** * host */ private String host; /** * task execute path */ private String executePath; /** * log path */ private String logPath; /** * task json */ private String taskJson; /** * processId
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
*/ private int processId; /** * appIds */ private String appIds; /** * process instance id */ private int processInstanceId; /** * process instance schedule time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date scheduleTime; /** * process instance global parameters */ private String globalParams; /** * execute user id */ private int executorId; /** * command type if complement */ private int cmdTypeIfComplement; /** * tenant code */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
private String tenantCode; /** * task queue */ private String queue; /** * process define id */ private int processDefineId; /** * project id */ private int projectId; /** * taskParams */ private String taskParams; /** * envFile */ private String envFile; /** * definedParams */ private Map<String, String> definedParams; /** * task AppId */ private String taskAppId; /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
* task timeout strategy */ private int taskTimeoutStrategy; /** * task timeout */ private int taskTimeout; /** * worker group */ private String workerGroup; /** * delay execution time. */ private int delayTime; /** * current execution status */ private ExecutionStatus currentExecutionStatus; /** * resources full name and tenant code */ private Map<String, String> resources; /** * sql TaskExecutionContext */ private SQLTaskExecutionContext sqlTaskExecutionContext; /** * datax TaskExecutionContext */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
private DataxTaskExecutionContext dataxTaskExecutionContext; /** * dependence TaskExecutionContext */ private DependenceTaskExecutionContext dependenceTaskExecutionContext; /** * sqoop TaskExecutionContext */ private SqoopTaskExecutionContext sqoopTaskExecutionContext; /** * procedure TaskExecutionContext */ private ProcedureTaskExecutionContext procedureTaskExecutionContext; public int getTaskInstanceId() { return taskInstanceId; } public void setTaskInstanceId(int taskInstanceId) { this.taskInstanceId = taskInstanceId; } public String getTaskName() { return taskName; } public void setTaskName(String taskName) { this.taskName = taskName; } public Date getFirstSubmitTime() { return firstSubmitTime; } public void setFirstSubmitTime(Date firstSubmitTime) { this.firstSubmitTime = firstSubmitTime;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
} public Date getStartTime() { return startTime; } public void setStartTime(Date startTime) { this.startTime = startTime; } public String getTaskType() { return taskType; } public void setTaskType(String taskType) { this.taskType = taskType; } public String getHost() { return host; } public void setHost(String host) { this.host = host; } public String getExecutePath() { return executePath; } public void setExecutePath(String executePath) { this.executePath = executePath; } public String getLogPath() { return logPath; } public void setLogPath(String logPath) { this.logPath = logPath;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
} public String getTaskJson() { return taskJson; } public void setTaskJson(String taskJson) { this.taskJson = taskJson; } public int getProcessId() { return processId; } public void setProcessId(int processId) { this.processId = processId; } public String getAppIds() { return appIds; } public void setAppIds(String appIds) { this.appIds = appIds; } public int getProcessInstanceId() { return processInstanceId; } public void setProcessInstanceId(int processInstanceId) { this.processInstanceId = processInstanceId; } public Date getScheduleTime() { return scheduleTime; } public void setScheduleTime(Date scheduleTime) { this.scheduleTime = scheduleTime;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
} public String getGlobalParams() { return globalParams; } public void setGlobalParams(String globalParams) { this.globalParams = globalParams; } public int getExecutorId() { return executorId; } public void setExecutorId(int executorId) { this.executorId = executorId; } public int getCmdTypeIfComplement() { return cmdTypeIfComplement; } public void setCmdTypeIfComplement(int cmdTypeIfComplement) { this.cmdTypeIfComplement = cmdTypeIfComplement; } public String getTenantCode() { return tenantCode; } public void setTenantCode(String tenantCode) { this.tenantCode = tenantCode; } public String getQueue() { return queue; } public void setQueue(String queue) { this.queue = queue;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
} public int getProcessDefineId() { return processDefineId; } public void setProcessDefineId(int processDefineId) { this.processDefineId = processDefineId; } public int getProjectId() { return projectId; } public void setProjectId(int projectId) { this.projectId = projectId; } public String getTaskParams() { return taskParams; } public void setTaskParams(String taskParams) { this.taskParams = taskParams; } public String getEnvFile() { return envFile; } public void setEnvFile(String envFile) { this.envFile = envFile; } public Map<String, String> getDefinedParams() { return definedParams; } public void setDefinedParams(Map<String, String> definedParams) { this.definedParams = definedParams;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
} public String getTaskAppId() { return taskAppId; } public void setTaskAppId(String taskAppId) { this.taskAppId = taskAppId; } public int getTaskTimeoutStrategy() { return taskTimeoutStrategy; } public void setTaskTimeoutStrategy(int taskTimeoutStrategy) { this.taskTimeoutStrategy = taskTimeoutStrategy; } public int getTaskTimeout() { return taskTimeout; } public void setTaskTimeout(int taskTimeout) { this.taskTimeout = taskTimeout; } public String getWorkerGroup() { return workerGroup; } public void setWorkerGroup(String workerGroup) { this.workerGroup = workerGroup; } public int getDelayTime() { return delayTime; } public void setDelayTime(int delayTime) { this.delayTime = delayTime;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
} public ExecutionStatus getCurrentExecutionStatus() { return currentExecutionStatus; } public void setCurrentExecutionStatus(ExecutionStatus currentExecutionStatus) { this.currentExecutionStatus = currentExecutionStatus; } public SQLTaskExecutionContext getSqlTaskExecutionContext() { return sqlTaskExecutionContext; } public void setSqlTaskExecutionContext(SQLTaskExecutionContext sqlTaskExecutionContext) { this.sqlTaskExecutionContext = sqlTaskExecutionContext; } public DataxTaskExecutionContext getDataxTaskExecutionContext() { return dataxTaskExecutionContext; } public void setDataxTaskExecutionContext(DataxTaskExecutionContext dataxTaskExecutionContext) { this.dataxTaskExecutionContext = dataxTaskExecutionContext; } public ProcedureTaskExecutionContext getProcedureTaskExecutionContext() { return procedureTaskExecutionContext; } public void setProcedureTaskExecutionContext(ProcedureTaskExecutionContext procedureTaskExecutionContext) { this.procedureTaskExecutionContext = procedureTaskExecutionContext; } public Command toCommand() { TaskExecuteRequestCommand requestCommand = new TaskExecuteRequestCommand(); requestCommand.setTaskExecutionContext(JSONUtils.toJsonString(this)); return requestCommand.convert2Command(); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
public DependenceTaskExecutionContext getDependenceTaskExecutionContext() { return dependenceTaskExecutionContext; } public void setDependenceTaskExecutionContext(DependenceTaskExecutionContext dependenceTaskExecutionContext) { this.dependenceTaskExecutionContext = dependenceTaskExecutionContext; } public Map<String, String> getResources() { return resources; } public void setResources(Map<String, String> resources) { this.resources = resources; } public SqoopTaskExecutionContext getSqoopTaskExecutionContext() { return sqoopTaskExecutionContext; } public void setSqoopTaskExecutionContext(SqoopTaskExecutionContext sqoopTaskExecutionContext) { this.sqoopTaskExecutionContext = sqoopTaskExecutionContext; } @Override public String toString() { return "TaskExecutionContext{" + "taskInstanceId=" + taskInstanceId + ", taskName='" + taskName + '\'' + ", currentExecutionStatus=" + currentExecutionStatus + ", firstSubmitTime=" + firstSubmitTime + ", startTime=" + startTime + ", taskType='" + taskType + '\'' + ", host='" + host + '\'' + ", executePath='" + executePath + '\'' + ", logPath='" + logPath + '\''
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/entity/TaskExecutionContext.java
+ ", taskJson='" + taskJson + '\'' + ", processId=" + processId + ", appIds='" + appIds + '\'' + ", processInstanceId=" + processInstanceId + ", scheduleTime=" + scheduleTime + ", globalParams='" + globalParams + '\'' + ", executorId=" + executorId + ", cmdTypeIfComplement=" + cmdTypeIfComplement + ", tenantCode='" + tenantCode + '\'' + ", queue='" + queue + '\'' + ", processDefineId=" + processDefineId + ", projectId=" + projectId + ", taskParams='" + taskParams + '\'' + ", envFile='" + envFile + '\'' + ", definedParams=" + definedParams + ", taskAppId='" + taskAppId + '\'' + ", taskTimeoutStrategy=" + taskTimeoutStrategy + ", taskTimeout=" + taskTimeout + ", workerGroup='" + workerGroup + '\'' + ", delayTime=" + delayTime + ", resources=" + resources + ", sqlTaskExecutionContext=" + sqlTaskExecutionContext + ", dataxTaskExecutionContext=" + dataxTaskExecutionContext + ", dependenceTaskExecutionContext=" + dependenceTaskExecutionContext + ", sqoopTaskExecutionContext=" + sqoopTaskExecutionContext + ", procedureTaskExecutionContext=" + procedureTaskExecutionContext + '}'; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/cache/TaskExecutionContextCacheManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.cache; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; /** * TaskExecutionContextCacheManager */ public interface TaskExecutionContextCacheManager {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/cache/TaskExecutionContextCacheManager.java
/** * get taskInstance by taskInstance id * * @param taskInstanceId taskInstanceId * @return taskInstance */ TaskExecutionContext getByTaskInstanceId(Integer taskInstanceId); /** * cache taskInstance * * @param taskExecutionContext taskExecutionContext */ void cacheTaskExecutionContext(TaskExecutionContext taskExecutionContext); /** * remove taskInstance by taskInstanceId * @param taskInstanceId taskInstanceId */ void removeByTaskInstanceId(Integer taskInstanceId); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/cache/impl/TaskExecutionContextCacheManagerImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.cache.impl; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager; import org.springframework.stereotype.Service; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; /** * TaskExecutionContextCache */ @Service public class TaskExecutionContextCacheManagerImpl implements TaskExecutionContextCacheManager { /** * taskInstance cache */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/cache/impl/TaskExecutionContextCacheManagerImpl.java
private Map<Integer,TaskExecutionContext> taskExecutionContextCache = new ConcurrentHashMap<>(); /** * get taskInstance by taskInstance id * * @param taskInstanceId taskInstanceId * @return taskInstance */ @Override public TaskExecutionContext getByTaskInstanceId(Integer taskInstanceId) { return taskExecutionContextCache.get(taskInstanceId); } /** * cache taskInstance * * @param taskExecutionContext taskExecutionContext */ @Override public void cacheTaskExecutionContext(TaskExecutionContext taskExecutionContext) { taskExecutionContextCache.put(taskExecutionContext.getTaskInstanceId(),taskExecutionContext); } /** * remove taskInstance by taskInstanceId * @param taskInstanceId taskInstanceId */ @Override public void removeByTaskInstanceId(Integer taskInstanceId) { taskExecutionContextCache.remove(taskInstanceId); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.processor; import org.apache.dolphinscheduler.common.enums.Event; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.Preconditions; import org.apache.dolphinscheduler.common.utils.RetryerUtils; import org.apache.dolphinscheduler.remote.command.Command;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand; import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.LogUtils; import org.apache.dolphinscheduler.server.worker.cache.ResponceCache; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.util.Date; import java.util.Optional; import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.netty.channel.Channel; /** * worker request processor */ public class TaskExecuteProcessor implements NettyRequestProcessor { private final Logger logger = LoggerFactory.getLogger(TaskExecuteProcessor.class); /** * thread executor service */ private final ExecutorService workerExecService; /** * worker config */ private final WorkerConfig workerConfig; /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
* task callback service */ private final TaskCallbackService taskCallbackService; public TaskExecuteProcessor() { this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class); this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class); this.workerExecService = ThreadUtils.newDaemonFixedThreadExecutor("Worker-Execute-Thread", workerConfig.getWorkerExecThreads()); } @Override public void process(Channel channel, Command command) { Preconditions.checkArgument(CommandType.TASK_EXECUTE_REQUEST == command.getType(), String.format("invalid command type : %s", command.getType())); TaskExecuteRequestCommand taskRequestCommand = JSONUtils.parseObject( command.getBody(), TaskExecuteRequestCommand.class); logger.info("received command : {}", taskRequestCommand); if (taskRequestCommand == null) { logger.error("task execute request command is null"); return; } String contextJson = taskRequestCommand.getTaskExecutionContext(); TaskExecutionContext taskExecutionContext = JSONUtils.parseObject(contextJson, TaskExecutionContext.class); if (taskExecutionContext == null) { logger.error("task execution context is null"); return; } Logger taskLogger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX, taskExecutionContext.getProcessDefineId(), taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getTaskInstanceId()));
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
taskExecutionContext.setHost(NetUtils.getHost() + ":" + workerConfig.getListenPort()); taskExecutionContext.setStartTime(new Date()); taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext)); taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); String execLocalPath = getExecLocalPath(taskExecutionContext); logger.info("task instance local execute path : {} ", execLocalPath); FileUtils.taskLoggerThreadLocal.set(taskLogger); try { FileUtils.createWorkDirAndUserIfAbsent(execLocalPath, taskExecutionContext.getTenantCode()); } catch (Throwable ex) { String errorLog = String.format("create execLocalPath : %s", execLocalPath); LoggerUtils.logError(Optional.ofNullable(logger), errorLog, ex); LoggerUtils.logError(Optional.ofNullable(taskLogger), errorLog, ex); } FileUtils.taskLoggerThreadLocal.remove(); taskCallbackService.addRemoteChannel(taskExecutionContext.getTaskInstanceId(), new NettyRemoteChannel(channel, command.getOpaque())); this.doAck(taskExecutionContext); workerExecService.submit(new TaskExecuteThread(taskExecutionContext, taskCallbackService, taskLogger)); } private void doAck(TaskExecutionContext taskExecutionContext){ TaskExecuteAckCommand ackCommand = buildAckCommand(taskExecutionContext); ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(),ackCommand.convert2Command(),Event.ACK); taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command()); } /** * build ack command
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
* @param taskExecutionContext taskExecutionContext * @return TaskExecuteAckCommand */ private TaskExecuteAckCommand buildAckCommand(TaskExecutionContext taskExecutionContext) { TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand(); ackCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId()); ackCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode()); ackCommand.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext)); ackCommand.setHost(taskExecutionContext.getHost()); ackCommand.setStartTime(taskExecutionContext.getStartTime()); if (taskExecutionContext.getTaskType().equals(TaskType.SQL.name()) || taskExecutionContext.getTaskType().equals(TaskType.PROCEDURE.name())) { ackCommand.setExecutePath(null); } else { ackCommand.setExecutePath(taskExecutionContext.getExecutePath()); } taskExecutionContext.setLogPath(ackCommand.getLogPath()); return ackCommand; } /** * get execute local path * @param taskExecutionContext taskExecutionContext * @return execute local path */ private String getExecLocalPath(TaskExecutionContext taskExecutionContext) { return FileUtils.getProcessExecDir(taskExecutionContext.getProjectId(), taskExecutionContext.getProcessDefineId(), taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getTaskInstanceId()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.processor; import org.apache.dolphinscheduler.common.Constants;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java
import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.Preconditions; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.TaskKillRequestCommand; import org.apache.dolphinscheduler.remote.command.TaskKillResponseCommand; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.remote.utils.Pair; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.ProcessUtils; import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.log.LogClientService; import java.util.Collections; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.netty.channel.Channel; /** * task kill processor */ public class TaskKillProcessor implements NettyRequestProcessor { private final Logger logger = LoggerFactory.getLogger(TaskKillProcessor.class);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java
/** * worker config */ private final WorkerConfig workerConfig; /** * task callback service */ private final TaskCallbackService taskCallbackService; /** * taskExecutionContextCacheManager */ private TaskExecutionContextCacheManager taskExecutionContextCacheManager; public TaskKillProcessor() { this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class); this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class); this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class); } /** * task kill process * * @param channel channel channel * @param command command command */ @Override public void process(Channel channel, Command command) { Preconditions.checkArgument(CommandType.TASK_KILL_REQUEST == command.getType(), String.format("invalid command type : %s", command.getType())); TaskKillRequestCommand killCommand = JSONUtils.parseObject(command.getBody(), TaskKillRequestCommand.class); logger.info("received kill command : {}", killCommand); Pair<Boolean, List<String>> result = doKill(killCommand); taskCallbackService.addRemoteChannel(killCommand.getTaskInstanceId(),
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java
new NettyRemoteChannel(channel, command.getOpaque())); TaskKillResponseCommand taskKillResponseCommand = buildKillTaskResponseCommand(killCommand,result); taskCallbackService.sendResult(taskKillResponseCommand.getTaskInstanceId(), taskKillResponseCommand.convert2Command()); taskExecutionContextCacheManager.removeByTaskInstanceId(taskKillResponseCommand.getTaskInstanceId()); } /** * do kill * @param killCommand * @return kill result */ private Pair<Boolean, List<String>> doKill(TaskKillRequestCommand killCommand) { List<String> appIds = Collections.EMPTY_LIST; try { TaskExecutionContext taskExecutionContext = taskExecutionContextCacheManager.getByTaskInstanceId(killCommand.getTaskInstanceId()); Integer processId = taskExecutionContext.getProcessId(); if (processId == null || processId.equals(0)) { logger.error("process kill failed, process id :{}, task id:{}", processId, killCommand.getTaskInstanceId()); return Pair.of(false, appIds); } String cmd = String.format("sudo kill -9 %s", ProcessUtils.getPidsStr(taskExecutionContext.getProcessId())); logger.info("process id:{}, cmd:{}", taskExecutionContext.getProcessId(), cmd); OSUtils.exeCmd(cmd); appIds = killYarnJob(Host.of(taskExecutionContext.getHost()).getIp(), taskExecutionContext.getLogPath(), taskExecutionContext.getExecutePath(), taskExecutionContext.getTenantCode()); return Pair.of(true, appIds); } catch (Exception e) { logger.error("kill task error", e);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java
} return Pair.of(false, appIds); } /** * build TaskKillResponseCommand * * @param killCommand kill command * @param result exe result * @return build TaskKillResponseCommand */ private TaskKillResponseCommand buildKillTaskResponseCommand(TaskKillRequestCommand killCommand, Pair<Boolean, List<String>> result) { TaskKillResponseCommand taskKillResponseCommand = new TaskKillResponseCommand(); taskKillResponseCommand.setStatus(result.getLeft() ? ExecutionStatus.SUCCESS.getCode() : ExecutionStatus.FAILURE.getCode()); taskKillResponseCommand.setAppIds(result.getRight()); TaskExecutionContext taskExecutionContext = taskExecutionContextCacheManager.getByTaskInstanceId(killCommand.getTaskInstanceId()); if (taskExecutionContext != null) { taskKillResponseCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId()); taskKillResponseCommand.setHost(taskExecutionContext.getHost()); taskKillResponseCommand.setProcessId(taskExecutionContext.getProcessId()); } return taskKillResponseCommand; } /** * kill yarn job * * @param host host * @param logPath logPath * @param executePath executePath * @param tenantCode tenantCode
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java
* @return List<String> appIds */ private List<String> killYarnJob(String host, String logPath, String executePath, String tenantCode) { LogClientService logClient = null; try { logClient = new LogClientService(); logger.info("view log host : {},logPath : {}", host,logPath); String log = logClient.viewLog(host, Constants.RPC_PORT, logPath); if (StringUtils.isNotEmpty(log)) { List<String> appIds = LoggerUtils.getAppIds(log, logger); if (StringUtils.isEmpty(executePath)) { logger.error("task instance execute path is empty"); throw new RuntimeException("task instance execute path is empty"); } if (appIds.size() > 0) { ProcessUtils.cancelApplication(appIds, logger, tenantCode, executePath); return appIds; } } } catch (Exception e) { logger.error("kill yarn job error",e); } finally { if (logClient != null) { logClient.close(); } } return Collections.EMPTY_LIST; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.ProcessUtils; import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
import org.apache.dolphinscheduler.service.process.ProcessService; import org.slf4j.Logger; import java.io.*; import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; import java.util.*; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.regex.Matcher; import java.util.regex.Pattern; import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_FAILURE; import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_SUCCESS; /** * abstract command executor */ public abstract class AbstractCommandExecutor { /** * rules for extracting application ID */ protected static final Pattern APPLICATION_REGEX = Pattern.compile(Constants.APPLICATION_REGEX); protected StringBuilder varPool = new StringBuilder(); /** * process */ private Process process; /** * log handler */ protected Consumer<List<String>> logHandler;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
/** * logger */ protected Logger logger; /** * log list */ protected final List<String> logBuffer; /** * taskExecutionContext */ protected TaskExecutionContext taskExecutionContext; /** * taskExecutionContextCacheManager */ private TaskExecutionContextCacheManager taskExecutionContextCacheManager; public AbstractCommandExecutor(Consumer<List<String>> logHandler, TaskExecutionContext taskExecutionContext , Logger logger){ this.logHandler = logHandler; this.taskExecutionContext = taskExecutionContext; this.logger = logger; this.logBuffer = Collections.synchronizedList(new ArrayList<>()); this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class); } /** * build process * * @param commandFile command file * @throws IOException IO Exception
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
*/ private void buildProcess(String commandFile) throws IOException { List<String> command = new LinkedList<>(); ProcessBuilder processBuilder = new ProcessBuilder(); processBuilder.directory(new File(taskExecutionContext.getExecutePath())); processBuilder.redirectErrorStream(true); command.add("sudo"); command.add("-u"); command.add(taskExecutionContext.getTenantCode()); command.add(commandInterpreter()); command.addAll(commandOptions()); command.add(commandFile); processBuilder.command(command); process = processBuilder.start(); printCommand(command); } /** * task specific execution logic * * @param execCommand execCommand * @return CommandExecuteResult * @throws Exception if error throws Exception */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
public CommandExecuteResult run(String execCommand) throws Exception{ CommandExecuteResult result = new CommandExecuteResult(); if (StringUtils.isEmpty(execCommand)) { return result; } String commandFilePath = buildCommandFilePath(); createCommandFileIfNotExists(execCommand, commandFilePath); buildProcess(commandFilePath); parseProcessOutput(process); Integer processId = getProcessId(process); result.setProcessId(processId); taskExecutionContext.setProcessId(processId); taskExecutionContextCacheManager.cacheTaskExecutionContext(taskExecutionContext); logger.info("process start, process id is: {}", processId); long remainTime = getRemaintime(); boolean status = process.waitFor(remainTime, TimeUnit.SECONDS); logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{}", taskExecutionContext.getExecutePath(), processId , result.getExitStatusCode()); if (status) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
List<String> appIds = getAppIds(taskExecutionContext.getLogPath()); result.setAppIds(String.join(Constants.COMMA, appIds)); result.setExitStatusCode(process.exitValue()); if (process.exitValue() == 0){ result.setExitStatusCode(isSuccessOfYarnState(appIds) ? EXIT_CODE_SUCCESS : EXIT_CODE_FAILURE); } } else { logger.error("process has failure , exitStatusCode : {} , ready to kill ...", result.getExitStatusCode()); ProcessUtils.kill(taskExecutionContext); result.setExitStatusCode(EXIT_CODE_FAILURE); } return result; } public String getVarPool() { return varPool.toString(); } /** * cancel application * @throws Exception exception */ public void cancelApplication() throws Exception { if (process == null) { return; } clear(); int processId = getProcessId(process); logger.info("cancel process: {}", processId);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
boolean killed = softKill(processId); if (!killed) { hardKill(processId); process.destroy(); process = null; } } /** * soft kill * @param processId process id * @return process is alive * @throws InterruptedException interrupted exception */ private boolean softKill(int processId) { if (processId != 0 && process.isAlive()) { try { String cmd = String.format("sudo kill %d", processId); logger.info("soft kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd); Runtime.getRuntime().exec(cmd); } catch (IOException e) { logger.info("kill attempt failed", e); } } return process.isAlive(); } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
* hard kill * @param processId process id */ private void hardKill(int processId) { if (processId != 0 && process.isAlive()) { try { String cmd = String.format("sudo kill -9 %d", processId); logger.info("hard kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd); Runtime.getRuntime().exec(cmd); } catch (IOException e) { logger.error("kill attempt failed ", e); } } } /** * print command * @param commands process builder */ private void printCommand(List<String> commands) { String cmdStr; try { cmdStr = ProcessUtils.buildCommandStr(commands); logger.info("task run command:\n{}", cmdStr); } catch (Exception e) { logger.error(e.getMessage(), e); } } /** * clear */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
private void clear() { List<String> markerList = new ArrayList<>(); markerList.add(ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER.toString()); if (!logBuffer.isEmpty()) { logHandler.accept(logBuffer); logBuffer.clear(); } logHandler.accept(markerList); } /** * get the standard output of the process * @param process process */ private void parseProcessOutput(Process process) { String threadLoggerInfoName = String.format(LoggerUtils.TASK_LOGGER_THREAD_NAME + "-%s", taskExecutionContext.getTaskAppId()); ExecutorService parseProcessOutputExecutorService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName); parseProcessOutputExecutorService.submit(new Runnable(){ @Override public void run() { BufferedReader inReader = null; try { inReader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; long lastFlushTime = System.currentTimeMillis(); while ((line = inReader.readLine()) != null) { if (line.startsWith("${setValue(")) { varPool.append(line.substring("${setValue(".length(), line.length() - 2)); varPool.append("$VarPool$"); } else {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
logBuffer.add(line); lastFlushTime = flush(lastFlushTime); } } } catch (Exception e) { logger.error(e.getMessage(),e); } finally { clear(); close(inReader); } } }); parseProcessOutputExecutorService.shutdown(); } /** * check yarn state * * @param appIds application id list * @return is success of yarn task state */ public boolean isSuccessOfYarnState(List<String> appIds) { boolean result = true; try { for (String appId : appIds) { while(Stopper.isRunning()){ ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId); logger.info("appId:{}, final state:{}",appId,applicationStatus.name()); if (applicationStatus.equals(ExecutionStatus.FAILURE) || applicationStatus.equals(ExecutionStatus.KILL)) { return false;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
} if (applicationStatus.equals(ExecutionStatus.SUCCESS)){ break; } Thread.sleep(Constants.SLEEP_TIME_MILLIS); } } } catch (Exception e) { logger.error(String.format("yarn applications: %s status failed ", appIds.toString()),e); result = false; } return result; } public int getProcessId() { return getProcessId(process); } /** * get app links * * @param logPath log path * @return app id list */ private List<String> getAppIds(String logPath) { List<String> logs = convertFile2List(logPath); List<String> appIds = new ArrayList<>(); /** * analysis log?get submited yarn application id */ for (String log : logs) { String appId = findAppId(log);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
if (StringUtils.isNotEmpty(appId) && !appIds.contains(appId)) { logger.info("find app id: {}", appId); appIds.add(appId); } } return appIds; } /** * convert file to list * @param filename file name * @return line list */ private List<String> convertFile2List(String filename) { List lineList = new ArrayList<String>(100); File file=new File(filename); if (!file.exists()){ return lineList; } BufferedReader br = null; try { br = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8)); String line = null; while ((line = br.readLine()) != null) { lineList.add(line); } } catch (Exception e) { logger.error(String.format("read file: %s failed : ",filename),e); } finally { if(br != null){ try {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
br.close(); } catch (IOException e) { logger.error(e.getMessage(),e); } } } return lineList; } /** * find app id * @param line line * @return appid */ private String findAppId(String line) { Matcher matcher = APPLICATION_REGEX.matcher(line); if (matcher.find()) { return matcher.group(); } return null; } /** * get remain time(s) * * @return remain time */ private long getRemaintime() { long usedTime = (System.currentTimeMillis() - taskExecutionContext.getStartTime().getTime()) / 1000; long remainTime = taskExecutionContext.getTaskTimeout() - usedTime; if (remainTime < 0) { throw new RuntimeException("task execution time out");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
} return remainTime; } /** * get process id * * @param process process * @return process id */ private int getProcessId(Process process) { int processId = 0; try { Field f = process.getClass().getDeclaredField(Constants.PID); f.setAccessible(true); processId = f.getInt(process); } catch (Throwable e) { logger.error(e.getMessage(), e); } return processId; } /** * when log buffer siz or flush time reach condition , then flush * * @param lastFlushTime last flush time * @return last flush time */ private long flush(long lastFlushTime) { long now = System.currentTimeMillis(); /** * when log buffer siz or flush time reach condition , then flush
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
*/ if (logBuffer.size() >= Constants.DEFAULT_LOG_ROWS_NUM || now - lastFlushTime > Constants.DEFAULT_LOG_FLUSH_INTERVAL) { lastFlushTime = now; /** logHandler.accept(logBuffer); logBuffer.clear(); } return lastFlushTime; } /** * close buffer reader * * @param inReader in reader */ private void close(BufferedReader inReader) { if (inReader != null) { try { inReader.close(); } catch (IOException e) { logger.error(e.getMessage(), e); } } } protected List<String> commandOptions() { return Collections.emptyList(); } protected abstract String buildCommandFilePath(); protected abstract String commandInterpreter(); protected abstract void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http:www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.processor; import java.util.Date; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.remote.NettyRemotingClient; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand; import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand; import org.apache.dolphinscheduler.remote.command.TaskExecuteResponseCommand; import org.apache.dolphinscheduler.remote.config.NettyClientConfig; import org.apache.dolphinscheduler.remote.config.NettyServerConfig; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java
import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.processor.TaskAckProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor; import org.apache.dolphinscheduler.server.master.processor.queue.TaskResponseService; import org.apache.dolphinscheduler.server.master.registry.MasterRegistry; import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager; import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.server.worker.registry.WorkerRegistry; import org.apache.dolphinscheduler.server.zk.SpringZKServer; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.zk.CuratorZookeeperClient; import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator; import org.apache.dolphinscheduler.service.zk.ZookeeperConfig; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import io.netty.channel.Channel; /** * test task call back service */ @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(classes={ TaskCallbackServiceTestConfig.class, SpringZKServer.class, SpringApplicationContext.class,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java
MasterRegistry.class, WorkerRegistry.class, ZookeeperRegistryCenter.class, MasterConfig.class, WorkerConfig.class, ZookeeperCachedOperator.class, ZookeeperConfig.class, ZookeeperNodeManager.class, TaskCallbackService.class, TaskResponseService.class, TaskAckProcessor.class, TaskResponseProcessor.class, TaskExecuteProcessor.class, CuratorZookeeperClient.class}) public class TaskCallbackServiceTest { @Autowired private TaskCallbackService taskCallbackService; @Autowired private MasterRegistry masterRegistry; @Autowired private TaskAckProcessor taskAckProcessor; @Autowired private TaskResponseProcessor taskResponseProcessor; @Autowired private TaskExecuteProcessor taskExecuteProcessor; /** * send ack test * @throws Exception */ @Test
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java
public void testSendAck() throws Exception{ final NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(30000); NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(serverConfig); nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, taskAckProcessor); nettyRemotingServer.start(); final NettyClientConfig clientConfig = new NettyClientConfig(); NettyRemotingClient nettyRemotingClient = new NettyRemotingClient(clientConfig); Channel channel = nettyRemotingClient.getChannel(Host.of("localhost:30000")); taskCallbackService.addRemoteChannel(1, new NettyRemoteChannel(channel, 1)); TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand(); ackCommand.setTaskInstanceId(1); ackCommand.setStartTime(new Date()); taskCallbackService.sendAck(1, ackCommand.convert2Command()); Stopper.stop(); nettyRemotingServer.close(); nettyRemotingClient.close(); } /** * send result test * @throws Exception */ @Test public void testSendResult() throws Exception{ final NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(30000); NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(serverConfig); nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_RESPONSE, taskResponseProcessor); nettyRemotingServer.start(); final NettyClientConfig clientConfig = new NettyClientConfig();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java
NettyRemotingClient nettyRemotingClient = new NettyRemotingClient(clientConfig); Channel channel = nettyRemotingClient.getChannel(Host.of("localhost:30000")); taskCallbackService.addRemoteChannel(1, new NettyRemoteChannel(channel, 1)); TaskExecuteResponseCommand responseCommand = new TaskExecuteResponseCommand(); responseCommand.setTaskInstanceId(1); responseCommand.setEndTime(new Date()); taskCallbackService.sendResult(1, responseCommand.convert2Command()); Thread.sleep(5000); Stopper.stop(); Thread.sleep(5000); nettyRemotingServer.close(); nettyRemotingClient.close(); } @Test public void testPause(){ Assert.assertEquals(5000, taskCallbackService.pause(3));; } @Test public void testSendAck1(){ masterRegistry.registry(); final NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(30000); NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(serverConfig); nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, taskAckProcessor); nettyRemotingServer.start(); final NettyClientConfig clientConfig = new NettyClientConfig(); NettyRemotingClient nettyRemotingClient = new NettyRemotingClient(clientConfig); Channel channel = nettyRemotingClient.getChannel(Host.of("localhost:30000")); taskCallbackService.addRemoteChannel(1, new NettyRemoteChannel(channel, 1)); TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,172
TaskKillProcessor throw NullPointException
1.when worker runing more than 100 tasks , there are some tasks in queue. with that ,master send a kill commad to this worker,TaskKillProcessor receive ,and get processId in cache(TaskExecutionContext). But the taskInstance isn't running in this worker ,it is in queue.so the TaskKillProcessor throw NullPointException. 2.This exception will be catched in this method.The TaskKillProcessor will continue run to build command .In method "buildKillTaskResponseCommand" , get TaskInstanceId in cache(TaskExecutionContext),but this cache is null,so the command send to master with a null taskInstanceId.
https://github.com/apache/dolphinscheduler/issues/4172
https://github.com/apache/dolphinscheduler/pull/4182
b3120a74d2656f7ad2054ba8245262551063b549
a13e737eb472b21b6b4c596ede9afc80fa3beb06
"2020-12-07T08:19:05Z"
java
"2020-12-11T07:41:35Z"
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java
ackCommand.setTaskInstanceId(1); ackCommand.setStartTime(new Date()); taskCallbackService.sendAck(1, ackCommand.convert2Command()); Assert.assertEquals(true, channel.isOpen()); Stopper.stop(); nettyRemotingServer.close(); nettyRemotingClient.close(); masterRegistry.unRegistry(); } @Test public void testTaskExecuteProcessor() throws Exception{ final NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(30000); NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(serverConfig); nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_REQUEST, taskExecuteProcessor); nettyRemotingServer.start(); final NettyClientConfig clientConfig = new NettyClientConfig(); NettyRemotingClient nettyRemotingClient = new NettyRemotingClient(clientConfig); TaskExecuteRequestCommand taskExecuteRequestCommand = new TaskExecuteRequestCommand(); nettyRemotingClient.send(new Host("localhost",30000),taskExecuteRequestCommand.convert2Command()); taskExecuteRequestCommand.setTaskExecutionContext(JSONUtils.toJsonString(new TaskExecutionContext())); nettyRemotingClient.send(new Host("localhost",30000),taskExecuteRequestCommand.convert2Command()); Thread.sleep(5000); Stopper.stop(); Thread.sleep(5000); nettyRemotingServer.close(); nettyRemotingClient.close(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,215
[improvement] ProcessService invalid code removal
*For better global communication, please give priority to using English description, thx! * *Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.* **Describe the question** A clear and concise description of what the question is. org.apache.dolphinscheduler.service.process.ProcessService#checkTaskExistsInTaskQueue方法恒返回false **Which version of DolphinScheduler:** 1.1.3 **Additional context** Add any other context about the problem here. ![image](https://user-images.githubusercontent.com/41229600/101970917-6002e580-3c68-11eb-84c1-135b943ec38b.png) **Requirement or improvement** - Please describe about your requirements or improvement suggestions. 如果返回恒定值,业务逻辑是否完备
https://github.com/apache/dolphinscheduler/issues/4215
https://github.com/apache/dolphinscheduler/pull/4221
b694474c78cafb298dd41d03cc0c54d2179dde2a
426eb9af034ff481f22d5da0a03662f6c9c7e32c
"2020-12-12T02:53:50Z"
java
"2020-12-13T15:05:27Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,215
[improvement] ProcessService invalid code removal
*For better global communication, please give priority to using English description, thx! * *Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.* **Describe the question** A clear and concise description of what the question is. org.apache.dolphinscheduler.service.process.ProcessService#checkTaskExistsInTaskQueue方法恒返回false **Which version of DolphinScheduler:** 1.1.3 **Additional context** Add any other context about the problem here. ![image](https://user-images.githubusercontent.com/41229600/101970917-6002e580-3c68-11eb-84c1-135b943ec38b.png) **Requirement or improvement** - Please describe about your requirements or improvement suggestions. 如果返回恒定值,业务逻辑是否完备
https://github.com/apache/dolphinscheduler/issues/4215
https://github.com/apache/dolphinscheduler/pull/4221
b694474c78cafb298dd41d03cc0c54d2179dde2a
426eb9af034ff481f22d5da0a03662f6c9c7e32c
"2020-12-12T02:53:50Z"
java
"2020-12-13T15:05:27Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
import static java.util.stream.Collectors.toSet; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.CycleEnum; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.CycleDependency; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Resource;